code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
UpperCAmelCase = """hopper-medium-v2"""
UpperCAmelCase = gym.make(env_name)
UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase = env.reset()
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1_000
UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = env.step(denorm_actions)
UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 420
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( a__ : list ) -> float:
if not nums:
raise ValueError('''List is empty''' )
return sum(a__ ) / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A = logging.getLogger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''token-classification'''
def __init__( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) == dict:
__a : Union[str, Any] = Namespace(**_UpperCAmelCase )
__a : Optional[int] = import_module('''tasks''' )
try:
__a : Any = getattr(_UpperCAmelCase , hparams.task_type )
__a : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
__a : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
__a : int = CrossEntropyLoss().ignore_index
super().__init__(_UpperCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return self.model(**_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__a : Optional[int] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__a : Union[str, Any] = self(**_UpperCAmelCase )
__a : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
__a : Union[str, Any] = self._feature_file(_UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _UpperCAmelCase )
__a : Any = torch.load(_UpperCAmelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
__a : Any = self.token_classification_task.read_examples_from_file(args.data_dir , _UpperCAmelCase )
__a : Union[str, Any] = self.token_classification_task.convert_examples_to_features(
_UpperCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , _UpperCAmelCase )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Any = self._feature_file(_UpperCAmelCase )
logger.info('''Loading features from cached file %s''' , _UpperCAmelCase )
__a : Dict = torch.load(_UpperCAmelCase )
__a : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__a : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__a : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__a : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__a : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , batch_size=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""Compute validation""" ""
__a : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__a : str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__a : List[str] = self(**_UpperCAmelCase )
__a , __a : Optional[int] = outputs[:2]
__a : Optional[int] = logits.detach().cpu().numpy()
__a : str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
__a : str = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
__a : Tuple = np.argmax(_UpperCAmelCase , axis=2 )
__a : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
__a : Optional[Any] = dict(enumerate(self.labels ) )
__a : int = [[] for _ in range(out_label_ids.shape[0] )]
__a : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__a : str = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(_UpperCAmelCase , _UpperCAmelCase ),
'''precision''': precision_score(_UpperCAmelCase , _UpperCAmelCase ),
'''recall''': recall_score(_UpperCAmelCase , _UpperCAmelCase ),
'''f1''': fa_score(_UpperCAmelCase , _UpperCAmelCase ),
}
__a : int = dict(results.items() )
__a : Any = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , _UpperCAmelCase ):
# when stable
__a , __a , __a : Union[str, Any] = self._eval_end(_UpperCAmelCase )
__a : List[Any] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , _UpperCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
__a , __a , __a : List[Any] = self._eval_end(_UpperCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__a : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=_UpperCAmelCase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=_UpperCAmelCase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_UpperCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A = NERTransformer.add_model_specific_args(parser, os.getcwd())
A = parser.parse_args()
A = NERTransformer(args)
A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 101
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCAmelCase = Features({'''text''': Value('''string''' )} )
__lowerCAmelCase = Features({} )
__lowerCAmelCase = "text"
@property
def _lowerCamelCase ( self ):
return {self.text_column: "text"}
| 101
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase (_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = torch.load(_snake_case ,map_location="cpu" )
if "model" in sd.keys():
__UpperCamelCase = torch.load(_snake_case ,map_location="cpu" )["model"]
# pop unnecessary weights
__UpperCamelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_snake_case )
__UpperCamelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__UpperCamelCase = sd.pop(_snake_case )
__UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
__UpperCamelCase = key.replace(".qkv_proj." ,".q_proj." )
__UpperCamelCase = key.replace(".qkv_proj." ,".k_proj." )
__UpperCamelCase = key.replace(".qkv_proj." ,".v_proj." )
__UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = torch.split(_snake_case ,depth // 3 ,dim=0 )
__UpperCamelCase = q
__UpperCamelCase = k
__UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (_snake_case ,_snake_case ,_snake_case=None ) -> int:
'''simple docstring'''
__UpperCamelCase = load_checkpoint(_snake_case )
if config is not None:
__UpperCamelCase = OPTConfig.from_pretrained(_snake_case )
else:
__UpperCamelCase = OPTConfig()
__UpperCamelCase = OPTModel(_snake_case ).half().eval()
model.load_state_dict(_snake_case )
# Check results
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 505
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The name of the task to train on.'} , )
_snake_case : Optional[List[str]] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_snake_case : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_snake_case : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Random seed for initialization.'} , )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
__UpperCamelCase = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__UpperCamelCase = int(eval_result * len(_snake_case ) )
print(_snake_case )
__UpperCamelCase = dataset.sort("probability" ,reverse=_snake_case )
__UpperCamelCase = dataset.select(range(_snake_case ) )
__UpperCamelCase = dataset.remove_columns(["label", "probability"] )
__UpperCamelCase = dataset.rename_column("prediction" ,"label" )
__UpperCamelCase = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__UpperCamelCase = dataset.shuffle(seed=args.seed )
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case ,index=_snake_case )
else:
dataset.to_json(_snake_case )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ) -> List[str]:
'''simple docstring'''
__UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__UpperCamelCase = STModelArguments(model_name_or_path=_snake_case )
__UpperCamelCase = STDataArguments(train_file=_snake_case ,infer_file=_snake_case )
__UpperCamelCase = STTrainingArguments(output_dir=_snake_case )
__UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case ,_snake_case ,_snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case ,_snake_case ):
setattr(_snake_case ,_snake_case ,_snake_case )
# Sanity checks
__UpperCamelCase = {}
__UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__UpperCamelCase = args.train_file
__UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__UpperCamelCase = args.eval_file
for key in data_files:
__UpperCamelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
__UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__UpperCamelCase = f"""{args.output_dir}/self-train_iter-{{}}""".format
__UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=_snake_case )
os.makedirs(_snake_case ,exist_ok=_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = False
# Show the progress bar
__UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
__UpperCamelCase = data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__UpperCamelCase = os.path.join(_snake_case ,"stage-1" )
__UpperCamelCase = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case ,_snake_case ):
arguments_dict.update({key: value} )
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." ,_snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" )
__UpperCamelCase = os.path.join(_snake_case ,"stage-2" )
# Update arguments_dict
__UpperCamelCase = model_path
__UpperCamelCase = data_files["train"]
__UpperCamelCase = current_output_dir
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." ,_snake_case )
__UpperCamelCase = iteration
__UpperCamelCase = data_dir_format(iteration + 1 )
__UpperCamelCase = AutoConfig.from_pretrained(os.path.join(_snake_case ,"best-checkpoint" ) )
__UpperCamelCase = config.idalabel
__UpperCamelCase = os.path.join(_snake_case ,"eval_results_best-checkpoint.json" )
__UpperCamelCase = os.path.join(_snake_case ,"test_results_best-checkpoint.json" )
assert os.path.exists(_snake_case )
with open(_snake_case ,"r" ) as f:
__UpperCamelCase = float(json.load(_snake_case )[args.eval_metric] )
__UpperCamelCase = os.path.join(_snake_case ,"infer_output_best-checkpoint.csv" )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__UpperCamelCase = load_dataset(args.data_file_extension ,data_files={"data": data_files["infer"]} )["data"]
__UpperCamelCase = load_dataset("csv" ,data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_snake_case ,exist_ok=_snake_case )
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__UpperCamelCase = eval_result
if best_iteration is None:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
__UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" ,_snake_case )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" ,args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
| 505
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = 42
class _lowerCAmelCase( _a , _a):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 6_55_36 , UpperCAmelCase = None , UpperCAmelCase = 2 , UpperCAmelCase = 2 , UpperCAmelCase = 0 , UpperCAmelCase = "fourier" , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = 0.0 , UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase = "UNetMidBlock1D" , UpperCAmelCase = None , UpperCAmelCase = (32, 32, 64) , UpperCAmelCase = None , UpperCAmelCase = 8 , UpperCAmelCase = 1 , UpperCAmelCase = False , )-> List[str]:
super().__init__()
__A = sample_size
# time
if time_embedding_type == "fourier":
__A = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
__A = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
__A = block_out_channels[0]
if use_timestep_embedding:
__A = block_out_channels[0] * 4
__A = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
__A = nn.ModuleList([] )
__A = None
__A = nn.ModuleList([] )
__A = None
# down
__A = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
__A = output_channel
__A = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A = i == len(UpperCAmelCase ) - 1
__A = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
__A = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
__A = list(reversed(UpperCAmelCase ) )
__A = reversed_block_out_channels[0]
if out_block_type is None:
__A = out_channels
else:
__A = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
__A = output_channel
__A = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
__A = i == len(UpperCAmelCase ) - 1
__A = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
__A = output_channel
# out
__A = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__A = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , )-> Union[UNetaDOutput, Tuple]:
__A = timestep
if not torch.is_tensor(UpperCAmelCase ):
__A = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
__A = timesteps[None].to(sample.device )
__A = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
__A = self.time_mlp(UpperCAmelCase )
else:
__A = timestep_embed[..., None]
__A = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__A = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__A = ()
for downsample_block in self.down_blocks:
__A , __A = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__A = down_block_res_samples[-1:]
__A = down_block_res_samples[:-1]
__A = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
__A = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 341
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCamelCase : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(snake_case , snake_case )
if weight_type is not None:
__A = getattr(snake_case , snake_case ).shape
else:
__A = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A = None
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
elif name.split('''.''' )[0] == "proj":
__A = fairseq_model.proj
__A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A = True
if "*" in mapped_key:
__A = name.split(snake_case )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , snake_case )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
__A = '''weight'''
else:
__A = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = emb.weight.shape
__A = nn.Linear(snake_case , snake_case , bias=snake_case )
__A = emb.weight.data
return lin_layer
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
with open(snake_case , '''r''' , encoding='''utf-8''' ) as f:
__A = f.readlines()
__A = [line.split(''' ''' )[0] for line in lines]
__A = len(snake_case )
__A = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Dict:
'''simple docstring'''
__A = WavaVecaConfig.from_pretrained(snake_case )
__A = SpeechaTextaConfig.from_pretrained(
snake_case , vocab_size=snake_case , decoder_layers=snake_case , do_stable_layer_norm=snake_case )
__A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
# set weights for wav2vec2 encoder
__A = WavaVecaModel(snake_case )
__A = recursively_load_weights_wavaveca(model.encoder , snake_case )
__A = SpeechaTextaForCausalLM(snake_case )
__A , __A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__A = SpeechEncoderDecoderModel(encoder=snake_case , decoder=snake_case )
__A = False
# add projection layer
__A = nn.Parameter(projection_layer.weight )
__A = nn.Parameter(projection_layer.bias )
__A = create_vocab_dict(snake_case )
with open(os.path.join(snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(snake_case , snake_case )
__A = SpeechaTextaTokenizer(os.path.join(snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(snake_case )
__A = hf_wavavec.config.to_dict()
__A = tokenizer.pad_token_id
__A = tokenizer.bos_token_id
__A = tokenizer.eos_token_id
__A = '''speech_to_text_2'''
__A = '''wav2vec2'''
__A = SpeechEncoderDecoderConfig.from_dict(snake_case )
hf_wavavec.save_pretrained(snake_case )
feature_extractor.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCamelCase : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 341
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"text": Value("string" )} )
lowerCamelCase : ClassVar[Features] =Features({"summary": Value("string" )} )
lowerCamelCase : str ="text"
lowerCamelCase : str ="summary"
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 651
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : bool =field(default=a_ , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] =field(
default=a_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowerCAmelCase : List[Any] = v.to_dict()
return d
| 651
| 1
|
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(snake_case__ , (list, tuple) ) or not all(
isinstance(snake_case__ , snake_case__ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = numbers[0]
for i in range(1 , len(snake_case__ ) ):
# update the maximum and minimum subarray products
lowerCAmelCase__ = numbers[i]
if number < 0:
lowerCAmelCase__ , lowerCAmelCase__ = min_till_now, max_till_now
lowerCAmelCase__ = max(snake_case__ , max_till_now * number )
lowerCAmelCase__ = min(snake_case__ , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase__ = max(snake_case__ , snake_case__ )
return max_prod
| 604
|
def UpperCAmelCase_ ( snake_case__ = 200 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase__ = [0] * (pence + 1)
lowerCAmelCase__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 604
| 1
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A ( UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ = "pt"
UpperCAmelCase_ = "tf"
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _lowercase (self : Any , __a : Optional[int] ):
UpperCAmelCase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _lowercase (self : int ):
UpperCAmelCase_ = "mock_framework"
# Framework provided - return whatever the user provides
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _lowercase (self : Any ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ = MagicMock(return_value=__a )
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ = MagicMock(return_value=__a )
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
| 78
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
_a = None
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Dict = os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
lowercase :int = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :List[str] = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
lowercase :Union[str, Any] = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :str = self.feature_extraction_class()
self.assertIsNotNone(_lowerCAmelCase )
| 704
|
def UpperCAmelCase__ ( lowerCamelCase ):
return 10 - x * x
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase :Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
lowercase :Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
lowercase :Any = c
else:
lowercase :List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 453
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( A : int ) -> int:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[Any] = 2
while digits < n:
index += 1
UpperCAmelCase_ : Optional[int] = len(str(fibonacci(A ) ) )
return index
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 541
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : Optional[Any] , _A : int , _A : List[Any]=7 , _A : Tuple=3 , _A : int=18 , _A : Union[str, Any]=30 , _A : Any=4_00 , _A : List[Any]=True , _A : Optional[int]=None , _A : Optional[Any]=True , _A : Union[str, Any]=None , ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Union[str, Any] = do_center_crop
UpperCAmelCase_ : Any = crop_size
def A ( self : List[Any] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = MobileNetVaImageProcessor if is_vision_available() else None
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = MobileNetVaImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Dict:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : int ) -> List[str]:
pass
def A ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : str ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 541
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=19 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : str = scope
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case (self ):
lowerCamelCase__ : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
lowerCamelCase__ : Dict = model(__magic_name__ )
lowerCamelCase__ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _snake_case (self ):
lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :str = False
UpperCamelCase :Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase :Optional[Any] = ()
UpperCamelCase :Any = {} if is_torch_available() else {}
UpperCamelCase :Any = False
def _snake_case (self ):
lowerCamelCase__ : List[str] = EsmFoldModelTester(self )
lowerCamelCase__ : str = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip("""Does not support attention outputs""" )
def _snake_case (self ):
pass
@unittest.skip
def _snake_case (self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _snake_case (self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def _snake_case (self ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def _snake_case (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case (self ):
pass
@require_torch
class __A ( A_ ):
@slow
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
lowerCamelCase__ : List[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Any = model(__magic_name__ )["""positions"""]
lowerCamelCase__ : List[str] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 96
|
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ ,lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 96
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _lowerCamelCase ( lowerCamelCase_: List[str] ):
'''simple docstring'''
A : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
A : Tuple = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
A : List[Any] = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
A : Optional[int] = '''.'''.join(lowerCamelCase_ )
return test_module_path
def _lowerCamelCase ( lowerCamelCase_: List[Any] ):
'''simple docstring'''
A : Optional[int] = get_module_path(lowerCamelCase_ )
A : Tuple = importlib.import_module(lowerCamelCase_ )
return test_module
def _lowerCamelCase ( lowerCamelCase_: List[str] ):
'''simple docstring'''
A : Any = []
A : Optional[int] = get_test_module(lowerCamelCase_ )
for attr in dir(lowerCamelCase_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : Optional[Any] = []
A : Tuple = get_test_module(lowerCamelCase_ )
for attr in dir(lowerCamelCase_ ):
A : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A : Tuple = getattr(lowerCamelCase_ , '''all_model_classes''' , [] )
if len(lowerCamelCase_ ) > 0:
test_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def _lowerCamelCase ( lowerCamelCase_: Tuple ):
'''simple docstring'''
A : Union[str, Any] = get_test_classes(lowerCamelCase_ )
A : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : List[str] = test_class()
if hasattr(lowerCamelCase_ , '''setUp''' ):
test.setUp()
A : str = None
if hasattr(lowerCamelCase_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A : Dict = test.model_tester.__class__
return model_tester
def _lowerCamelCase ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
'''simple docstring'''
A : List[Any] = get_test_classes(lowerCamelCase_ )
A : Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: Any ):
'''simple docstring'''
A : Union[str, Any] = get_test_classes_for_model(lowerCamelCase_ , lowerCamelCase_ )
A : Dict = []
for test_class in test_classes:
A : Optional[Any] = get_model_tester_from_test_class(lowerCamelCase_ )
if tester_class is not None:
tester_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def _lowerCamelCase ( lowerCamelCase_: Any ):
'''simple docstring'''
A : Any = get_test_classes(lowerCamelCase_ )
A : str = {test_class: get_model_tester_from_test_class(lowerCamelCase_ ) for test_class in test_classes}
return test_tester_mapping
def _lowerCamelCase ( lowerCamelCase_: List[Any] ):
'''simple docstring'''
A : Optional[Any] = get_model_classes(lowerCamelCase_ )
A : str = {
model_class: get_test_classes_for_model(lowerCamelCase_ , lowerCamelCase_ ) for model_class in model_classes
}
return model_test_mapping
def _lowerCamelCase ( lowerCamelCase_: List[str] ):
'''simple docstring'''
A : str = get_model_classes(lowerCamelCase_ )
A : int = {
model_class: get_tester_classes_for_model(lowerCamelCase_ , lowerCamelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCamelCase ( lowerCamelCase_: Dict ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return o
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return o.__name__
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return [to_json(lowerCamelCase_ ) for x in o]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {to_json(lowerCamelCase_ ): to_json(lowerCamelCase_ ) for k, v in o.items()}
else:
return o
| 256
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
A : Optional[int] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ""
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 256
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : int=7 , __UpperCamelCase : Any=3 , __UpperCamelCase : List[str]=18 , __UpperCamelCase : List[str]=30 , __UpperCamelCase : Any=400 , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=[0.48145466, 0.4578275, 0.40821073] , __UpperCamelCase : Any=[0.26862954, 0.26130258, 0.27577711] , __UpperCamelCase : int=True , ):
_UpperCAmelCase = size if size is not None else {"height": 224, "width": 224}
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : List[str]=False , __UpperCamelCase : int=False , __UpperCamelCase : int=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_UpperCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_UpperCAmelCase = [torch.from_numpy(__UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_convert_rgb" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : int ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__UpperCamelCase )
_UpperCAmelCase = 3
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_convert_rgb" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 129
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 50_257 , __UpperCamelCase : int = 1_024 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "gelu_new" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 1e-5 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=__UpperCamelCase , n_positions=__UpperCamelCase , n_embd=__UpperCamelCase , n_layer=__UpperCamelCase , n_head=__UpperCamelCase , n_inner=__UpperCamelCase , activation_function=__UpperCamelCase , resid_pdrop=__UpperCamelCase , embd_pdrop=__UpperCamelCase , attn_pdrop=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , initializer_range=__UpperCamelCase , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , scale_attn_by_inverse_layer_idx=__UpperCamelCase , reorder_and_upcast_attn=__UpperCamelCase , )
_UpperCAmelCase = GPTaLMHeadModel(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor , __UpperCamelCase : torch.Tensor , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[torch.Tensor] = None , ):
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
_UpperCAmelCase = self.encode_prefix(__UpperCamelCase )
_UpperCAmelCase = self.decode_prefix(__UpperCamelCase )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase , labels=__UpperCamelCase , attention_mask=__UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : torch.device ):
return torch.zeros(__UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[Any] ):
return self.encode_prefix(__UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = torch.split(__UpperCamelCase , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(__UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=__UpperCamelCase , device=__UpperCamelCase , eos_token_id=__UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 67 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : Optional[int] = None , ):
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.int )
_UpperCAmelCase = torch.zeros(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(__UpperCamelCase , -1 )
_UpperCAmelCase = generated.expand(__UpperCamelCase , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(__UpperCamelCase , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(__UpperCamelCase , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(__UpperCamelCase ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=__UpperCamelCase )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(__UpperCamelCase , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 129
| 1
|
'''simple docstring'''
import numpy as np
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
_a = int(np.ceil((x_end - xa) / h ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(UpperCamelCase ):
_a = f(UpperCamelCase , y[k] )
_a = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_a = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_a = f(x + h , y[k] + h * ka )
_a = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = num_mel_bins
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Tuple:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SpeechaTextFeatureExtractionTester(self )
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase = self._load_datasamples(1 )
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 457
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A_(unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=None , A=True , A=True , A=None , ):
_lowerCamelCase : str = size if size is not None else {'height': 20, 'width': 20}
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : Union[str, Any] = max_resolution
_lowerCamelCase : Optional[int] = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : Dict = do_convert_rgb
_lowerCamelCase : Optional[Any] = [512, 1024, 2048, 4096]
_lowerCamelCase : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def _lowerCAmelCase ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , 'do_normalize' ) )
self.assertTrue(hasattr(A , 'do_convert_rgb' ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.image_processor_tester.prepare_dummy_image()
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase : List[Any] = 2048
_lowerCamelCase : List[str] = image_processor(A , return_tensors='pt' , max_patches=A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def _lowerCAmelCase ( self ):
# Initialize image_processor
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_lowerCamelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : List[Any] = image_processor(
A , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self ):
# Initialize image_processor
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_lowerCamelCase : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_lowerCamelCase : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(A ):
_lowerCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A ).flattened_patches
_lowerCamelCase : Union[str, Any] = 'Hello'
_lowerCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(
A , return_tensors='pt' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self ):
# Initialize image_processor
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
_lowerCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(
A , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self ):
# Initialize image_processor
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Tuple = image_processor(
A , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Tuple = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
_lowerCamelCase : Dict = 3
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , 'do_normalize' ) )
self.assertTrue(hasattr(A , 'do_convert_rgb' ) )
def _lowerCAmelCase ( self ):
# Initialize image_processor
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_lowerCamelCase : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : int = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(
A , return_tensors='pt' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 349
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCamelCase : str = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowerCamelCase : int = {'unk_token': '<unk>'}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
_lowerCamelCase : Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A , A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCamelCase : List[str] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Any = CLIPSegProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_lowerCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : Optional[int] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : List[Any] = self.prepare_image_inputs()
_lowerCamelCase : int = image_processor(A , return_tensors='np' )
_lowerCamelCase : List[str] = processor(images=A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Optional[Any] = 'lower newer'
_lowerCamelCase : Any = processor(text=A )
_lowerCamelCase : Union[str, Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : List[str] = 'lower newer'
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Optional[int] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : List[str] = processor(images=A , visual_prompt=A )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Dict = CLIPSegProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : str = processor.batch_decode(A )
_lowerCamelCase : Tuple = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
| 349
| 1
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
A = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase : Dict = kwargs.get("name_or_path")
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored")
__UpperCAmelCase : Optional[int] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase : str = "<|endoftext|>" if eos_token is None else eos_token
__UpperCAmelCase : Optional[Any] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase : Dict = unk_token if pad_token is None else pad_token
__UpperCAmelCase : Any = eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase : List[Any] = "<pad>" if pad_token is None else pad_token
__UpperCAmelCase : List[str] = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = do_lower_case
__UpperCAmelCase : List[str] = remove_space
__UpperCAmelCase : Any = keep_accents
__UpperCAmelCase : int = vocab_file
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase : Dict = re.compile(
F"[{''.join(map(UpperCamelCase_ , list(range(0 , 9)) + list(range(11 , 32)) + list(range(127 , 160)) + [160, 173, 8203]))}]")
def __getstate__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.__dict__.copy()
__UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a_ ( self : Any):
"""simple docstring"""
return len(self.sp_model)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.non_printing_characters_re.sub("" , UpperCamelCase_)
# Normalize whitespaces
__UpperCAmelCase : List[Any] = "".join([char if char not in self.whitespaces else " " for char in text])
# NFC Unicode normalization
__UpperCAmelCase : Any = unicodedata.normalize("NFC" , UpperCamelCase_)
return text
def a_ ( self : Dict , UpperCamelCase_ : str , **UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.preprocess_text(UpperCamelCase_)
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase_)
def a_ ( self : List[str] , UpperCamelCase_ : int):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase_)
@staticmethod
def a_ ( UpperCamelCase_ : str):
"""simple docstring"""
return out_string
def a_ ( self : int , UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Tuple = ""
__UpperCAmelCase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : int = True
__UpperCAmelCase : Dict = []
else:
current_sub_tokens.append(UpperCamelCase_)
__UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
def a_ ( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[str, bool] = False):
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : Any = self.preprocess_text(UpperCamelCase_)
__UpperCAmelCase : Any = self.sp_model.encode(UpperCamelCase_)
else:
__UpperCAmelCase : Dict = [self.preprocess_text(UpperCamelCase_) for t in text]
__UpperCAmelCase : List[Any] = self.sp_model.encode(UpperCamelCase_)
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase_)
return token_ids
def a_ ( self : int , UpperCamelCase_ : Union[int, List[int]]):
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : "Conversation"):
"""simple docstring"""
__UpperCAmelCase : int = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
__UpperCAmelCase : Optional[Any] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(UpperCamelCase_) + F"{self.bos_token}Bot:"
)
return self.encode(text=UpperCamelCase_)
| 77
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = '''ZinengTang/tvlt-base'''
snake_case = tempfile.mkdtemp()
def a_ ( self , **__snake_case ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self , **__snake_case ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(__snake_case , return_tensors='''np''' )
snake_case = processor(audio=__snake_case , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 550
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : int = BartphoTokenizer
A__ : Union[str, Any] = False
A__ : str = True
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
_snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
_snake_case = BartphoTokenizer(__lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Any , **__lowerCamelCase : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = '''This is a là test'''
_snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = BartphoTokenizer(__lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
_snake_case = '''This is a là test'''
_snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_snake_case = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 715
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ ) -> None:
_snake_case = generate_pascal_triangle(lowerCAmelCase_ )
for row_idx in range(lowerCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def snake_case ( lowerCAmelCase_ ) -> list[list[int]]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_snake_case = []
for current_row_idx in range(lowerCAmelCase_ ):
_snake_case = populate_current_row(lowerCAmelCase_ , lowerCAmelCase_ )
triangle.append(lowerCAmelCase_ )
return triangle
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[int]:
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , lowerCAmelCase_ ):
calculate_current_element(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return current_row
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def snake_case ( lowerCAmelCase_ ) -> list[list[int]]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_snake_case = [[1]]
for row_index in range(1 , lowerCAmelCase_ ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(lowerCAmelCase_ , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(lowerCAmelCase_ )
return result
def snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = f"""{func.__name__}({value})"""
_snake_case = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 404
| 0
|
'''simple docstring'''
import random
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = num - 1
__lowercase = 0
while s % 2 == 0:
__lowercase = s // 2
t += 1
for _ in range(5 ):
__lowercase = random.randrange(2 , num - 1 )
__lowercase = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
__lowercase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowercase = i + 1
__lowercase = (v**2) % num
return True
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if num < 2:
return False
__lowercase = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_2_4 ):
while True:
__lowercase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 502
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE__ : List[Any] = 1.6021e-19 # units = C
def a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 538
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = ShapEImgaImgPipeline
lowerCamelCase : List[Any] = ["""image"""]
lowerCamelCase : Optional[int] = ["""image"""]
lowerCamelCase : List[str] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase : Optional[Any] = False
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return 32
@property
def UpperCAmelCase__ ( self : Any):
return 32
@property
def UpperCAmelCase__ ( self : Tuple):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any]):
return 8
@property
def UpperCAmelCase__ ( self : Tuple):
torch.manual_seed(0)
_lowercase: str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowercase: Union[str, Any] = CLIPVisionModel(_UpperCamelCase)
return model
@property
def UpperCAmelCase__ ( self : int):
_lowercase: Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCamelCase , do_normalize=_UpperCamelCase , do_resize=_UpperCamelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : Optional[Any]):
torch.manual_seed(0)
_lowercase: Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowercase: List[str] = PriorTransformer(**_UpperCamelCase)
return model
@property
def UpperCAmelCase__ ( self : Any):
torch.manual_seed(0)
_lowercase: Optional[int] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_lowercase: List[str] = ShapERenderer(**_UpperCamelCase)
return model
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[Any] = self.dummy_prior
_lowercase: Any = self.dummy_image_encoder
_lowercase: str = self.dummy_image_processor
_lowercase: Optional[Any] = self.dummy_renderer
_lowercase: int = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_UpperCamelCase , clip_sample=_UpperCamelCase , clip_sample_range=1.0 , )
_lowercase: Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=0):
_lowercase: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
if str(_UpperCamelCase).startswith("mps"):
_lowercase: str = torch.manual_seed(_UpperCamelCase)
else:
_lowercase: str = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowercase: List[str] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Any = "cpu"
_lowercase: int = self.get_dummy_components()
_lowercase: Dict = self.pipeline_class(**_UpperCamelCase)
_lowercase: Tuple = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: Dict = pipe(**self.get_dummy_inputs(_UpperCamelCase))
_lowercase: List[str] = output.images[0]
_lowercase: int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowercase: Optional[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = torch_device == "cpu"
_lowercase: str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCamelCase , relax_max_difference=_UpperCamelCase , )
def UpperCAmelCase__ ( self : str):
_lowercase: Any = self.get_dummy_components()
_lowercase: Dict = self.pipeline_class(**_UpperCamelCase)
_lowercase: Any = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: List[str] = 1
_lowercase: Dict = 2
_lowercase: str = self.get_dummy_inputs(_UpperCamelCase)
for key in inputs.keys():
if key in self.batch_params:
_lowercase: List[str] = batch_size * [inputs[key]]
_lowercase: Optional[Any] = pipe(**_UpperCamelCase , num_images_per_prompt=_UpperCamelCase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png")
_lowercase: int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy")
_lowercase: Tuple = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img")
_lowercase: Union[str, Any] = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: Union[str, Any] = torch.Generator(device=_UpperCamelCase).manual_seed(0)
_lowercase: Tuple = pipe(
_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
| 206
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = XLMProphetNetTokenizer
lowerCamelCase : str = False
lowerCamelCase : List[str] = True
def UpperCAmelCase__ ( self : Any):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase: Optional[Any] = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Optional[Any] = "[PAD]"
_lowercase: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "[PAD]")
self.assertEqual(vocab_keys[1] , "[CLS]")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(_UpperCamelCase) , 1_012)
def UpperCAmelCase__ ( self : str):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012)
def UpperCAmelCase__ ( self : Tuple):
_lowercase: str = XLMProphetNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
_lowercase: Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowercase: Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase: Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_lowercase: str = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Optional[int]):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased")
@slow
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[int] = "Hello World!"
_lowercase: Optional[int] = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase))
@slow
def UpperCAmelCase__ ( self : Tuple):
# fmt: off
_lowercase: Dict = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 206
| 1
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''OwlViTImageProcessor'''
__magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : int = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]="max_length" , lowerCAmelCase_ : Tuple="np" , **lowerCAmelCase_ : List[Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
UpperCAmelCase_ : int = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
UpperCAmelCase_ : str = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
UpperCAmelCase_ : int = t + [" "] * (max_num_queries - len(lowerCAmelCase_ ))
UpperCAmelCase_ : List[str] = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase_ : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Dict = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : Any = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Optional[Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Any = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : List[str] = input_ids
UpperCAmelCase_ : Any = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
UpperCAmelCase_ : str = query_pixel_values
if images is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> Optional[Any]:
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> int:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 95
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''MaskFormerFeatureExtractor''']
__lowerCamelCase = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
__lowerCamelCase = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 467
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A__ : List[Any] = parser.parse_args()
if args.model_type == "bert":
A__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A__ : Optional[Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
A__ : List[Any] = model.state_dict()
A__ : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
A__ : Optional[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A__ : Dict = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A__ : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A__ : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A__ : int = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A__ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A__ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A__ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A__ : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A__ : Optional[int] = state_dict['cls.predictions.decoder.weight']
A__ : int = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ : Optional[Any] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A__ : Tuple = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 720
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
A__ : Dict = logging.get_logger(__name__)
A__ : int = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''dpt'''
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=384 , A_=16 , A_=3 , A_=False , A_=True , A_=[2, 5, 8, 11] , A_="project" , A_=[4, 2, 1, 0.5] , A_=[96, 192, 384, 768] , A_=256 , A_=-1 , A_=False , A_=True , A_=0.4 , A_=255 , A_=0.1 , A_=[1, 1024, 24, 24] , A_=[0, 1] , A_=None , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
_lowercase: Union[str, Any] = hidden_size
_lowercase: str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_lowercase: List[str] = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
_lowercase: str = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowercase: Dict = backbone_featmap_shape
_lowercase: Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_lowercase: Any = None
_lowercase: str = None
_lowercase: Optional[Any] = []
_lowercase: Tuple = num_hidden_layers
_lowercase: Optional[int] = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_act
_lowercase: Union[str, Any] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Dict = initializer_range
_lowercase: Dict = layer_norm_eps
_lowercase: Any = image_size
_lowercase: Optional[int] = patch_size
_lowercase: Optional[int] = num_channels
_lowercase: int = qkv_bias
_lowercase: List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_lowercase: Union[str, Any] = readout_type
_lowercase: Optional[Any] = reassemble_factors
_lowercase: List[str] = neck_hidden_sizes
_lowercase: Tuple = fusion_hidden_size
_lowercase: int = head_in_index
_lowercase: Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowercase: int = use_auxiliary_head
_lowercase: Dict = auxiliary_loss_weight
_lowercase: List[Any] = semantic_loss_ignore_index
_lowercase: Dict = semantic_classifier_dropout
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowercase: List[str] = self.backbone_config.to_dict()
_lowercase: List[Any] = self.__class__.model_type
return output
| 272
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __A :
UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.task_name.lower()
class __A ( UpperCamelCase__ ):
UpperCamelCase = """train"""
UpperCamelCase = """dev"""
UpperCamelCase = """test"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Optional[Any] , __snake_case :GlueDataTrainingArguments , __snake_case :PreTrainedTokenizerBase , __snake_case :Optional[int] = None , __snake_case :Union[str, Split] = Split.train , __snake_case :Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __snake_case , )
__magic_name__ : Tuple =args
__magic_name__ : List[str] =glue_processors[args.task_name]()
__magic_name__ : Optional[int] =glue_output_modes[args.task_name]
if isinstance(__snake_case , __snake_case ):
try:
__magic_name__ : List[Any] =Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__magic_name__ : Optional[Any] =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
__magic_name__ : List[str] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ , __magic_name__ : int =label_list[2], label_list[1]
__magic_name__ : str =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Tuple =cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not args.overwrite_cache:
__magic_name__ : str =time.time()
__magic_name__ : Optional[int] =torch.load(__snake_case )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
__magic_name__ : Optional[Any] =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__magic_name__ : Any =self.processor.get_test_examples(args.data_dir )
else:
__magic_name__ : int =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__magic_name__ : Optional[int] =examples[:limit_length]
__magic_name__ : Any =glue_convert_examples_to_features(
__snake_case , __snake_case , max_length=args.max_seq_length , label_list=__snake_case , output_mode=self.output_mode , )
__magic_name__ : str =time.time()
torch.save(self.features , __snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self :Tuple ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :List[Any] , __snake_case :Tuple ):
'''simple docstring'''
return self.features[i]
def A__ ( self :int ):
'''simple docstring'''
return self.label_list
| 21
|
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21
| 1
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : str ) ->Any:
"""simple docstring"""
def get_masked_lm_array(lowercase : str ):
lowercase__ = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_array(lowercase : str ):
lowercase__ = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_layer_array(lowercase : int , lowercase : str ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_attention_layer_array(lowercase : int , lowercase : str , lowercase : Any ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
lowercase__ = array.reshape(lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
print(F'''Loading model based on config from {config_path}...''' )
lowercase__ = BertConfig.from_json_file(lowercase )
lowercase__ = BertForMaskedLM(lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase__ = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase__ = layer.attention.self
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase__ = layer.attention.output
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/beta''' )
# Intermediate
lowercase__ = layer.intermediate
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/bias''' )
# Output
lowercase__ = layer.output
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/bias''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/beta''' )
# Embeddings
lowercase__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
lowercase__ = model.cls.predictions.transform
lowercase__ = get_masked_lm_array('''dense/kernel''' )
lowercase__ = get_masked_lm_array('''dense/bias''' )
lowercase__ = get_masked_lm_array('''layer_norm/gamma''' )
lowercase__ = get_masked_lm_array('''layer_norm/beta''' )
lowercase__ = get_masked_lm_array('''embedding_table''' )
# Pooling
lowercase__ = BertPooler(config=lowercase )
lowercase__ = get_encoder_array('''_pooler_layer/kernel''' )
lowercase__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowercase )
# Integration test - should load without any errors ;)
lowercase__ = BertForMaskedLM.from_pretrained(lowercase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
|
'''simple docstring'''
import numpy as np
import datasets
_lowerCAmelCase = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
_lowerCAmelCase = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
_lowerCAmelCase = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Any:
# convert to numpy arrays
lowercase__ = np.array(_lowerCamelCase )
lowercase__ = np.array(_lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(_lowerCamelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(_lowerCamelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(_lowerCamelCase )
lowercase__ = np.dot(_lowerCamelCase , _lowerCamelCase )
lowercase__ = np.dot(_lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 318
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> str:
_A = tempfile.mkdtemp()
# fmt: off
_A = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_A = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_A = {'unk_token': '<unk>'}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
_A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self , **_UpperCamelCase )-> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self , **_UpperCamelCase )-> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self , **_UpperCamelCase )-> Union[str, Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self )-> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self )-> str:
_A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self )-> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def UpperCamelCase ( self )-> List[Any]:
_A = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_A = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
_A = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase ( self )-> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_A = self.prepare_image_inputs()
_A = image_processor(_UpperCamelCase , return_tensors='np' )
_A = processor(images=_UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self )-> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_A = 'lower newer'
_A = processor(text=_UpperCamelCase )
_A = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self )-> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_A = 'lower newer'
_A = self.prepare_image_inputs()
_A = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase ( self )-> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(_UpperCamelCase )
_A = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_A = 'lower newer'
_A = self.prepare_image_inputs()
_A = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 292
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any]="no" , __UpperCamelCase : str = default_json_config_file , __UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
_A = Path(__UpperCamelCase )
path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
_A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
_A = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_A = torch.cuda.device_count()
_A = num_gpus
_A = False
if num_gpus > 1:
_A = 'MULTI_GPU'
else:
_A = 'NO'
elif is_xpu_available() and use_xpu:
_A = torch.xpu.device_count()
_A = num_xpus
_A = False
if num_xpus > 1:
_A = 'MULTI_XPU'
else:
_A = 'NO'
elif is_npu_available():
_A = torch.npu.device_count()
_A = num_npus
_A = False
if num_npus > 1:
_A = 'MULTI_NPU'
else:
_A = 'NO'
else:
_A = 0
_A = True
_A = 1
_A = 'NO'
_A = ClusterConfig(**__UpperCamelCase )
config.to_json_file(__UpperCamelCase )
return path
def lowerCamelCase_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
_A = parser.add_parser('default' , parents=__UpperCamelCase , help=__UpperCamelCase , formatter_class=__UpperCamelCase )
parser.add_argument(
'--config_file' , default=__UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__UpperCamelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCamelCase_ ( __UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 292
| 1
|
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCAmelCase__ : Dict = """facebook/wmt19-en-de"""
lowerCAmelCase__ : Optional[int] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCAmelCase__ : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCAmelCase__ : Tuple = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCAmelCase__ : Union[str, Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCAmelCase__ : List[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCAmelCase__ : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 713
|
'''simple docstring'''
def _a ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
snake_case__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case__ : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase : Any = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : List[Any] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCamelCase : List[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCamelCase : str = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : List[str] = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : List[str] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : List[str] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCamelCase : Tuple = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : List[Any] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCamelCase : str = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : Union[str, Any] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : List[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Dict = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCamelCase : int = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCamelCase : Dict = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCamelCase : Dict = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : List[Any] = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCamelCase : Union[str, Any] = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Optional[int] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCamelCase : Dict = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : List[str] = key.split("." )
lowerCamelCase , lowerCamelCase : Dict = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : str = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : str = val[:dim, :]
lowerCamelCase : List[Any] = val[dim : dim * 2, :]
lowerCamelCase : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase : List[str] = val[:dim]
lowerCamelCase : List[str] = val[dim : dim * 2]
lowerCamelCase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split("." )
lowerCamelCase : Optional[Any] = int(key_split[3] )
lowerCamelCase : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : Dict = val[
dim : dim * 2, :
]
lowerCamelCase : str = val[-dim:, :]
else:
lowerCamelCase : int = val[:dim]
lowerCamelCase : Tuple = val[dim : dim * 2]
lowerCamelCase : int = val[-dim:]
else:
lowerCamelCase : int = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Tuple = val.squeeze_()
else:
lowerCamelCase : Optional[int] = val
return orig_state_dict
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Any = GroupViTConfig()
lowerCamelCase : Any = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
lowerCamelCase : str = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Dict = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase , lowerCamelCase : Optional[int] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
lowerCamelCase : Tuple = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase : str = prepare_img()
lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : List[Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Union[str, Any] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
_snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 340
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 ):
'''simple docstring'''
lowerCamelCase : Dict = None
if token is not None:
lowerCamelCase : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCamelCase : Optional[int] = "636036"
lowerCamelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCamelCase : Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
return result["workflow_runs"]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
if workflow_run_id is not None:
lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = {}
for artifact_name in artifact_names:
lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{artifact_name}.zip""" )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
lowerCamelCase : int = f.read().decode("UTF-8" )
return results
| 340
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class __a ( UpperCamelCase_ ):
__UpperCamelCase : List[Any] = field(default='text-classification', metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCamelCase : Optional[int] = Features({'text': Value('string' )} )
__UpperCamelCase : str = Features({'labels': ClassLabel} )
__UpperCamelCase : Optional[Any] = 'text'
__UpperCamelCase : Any = 'labels'
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__A ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__SCREAMING_SNAKE_CASE = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE = self.label_schema.copy()
__SCREAMING_SNAKE_CASE = features[self.label_column]
__SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 702
|
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 267
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : List[str] ):
'''simple docstring'''
if index == r:
for j in range(_snake_case ):
print(data[j] ,end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase__ = arr[i]
combination_util(_snake_case ,_snake_case ,_snake_case ,index + 1 ,_snake_case ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_snake_case ,_snake_case ,_snake_case ,0 ,_snake_case ,0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 267
| 1
|
from __future__ import annotations
def lowercase ( SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> bool:
_snake_case : int = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
_snake_case : Dict = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
_snake_case : str = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def lowercase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> bool:
_snake_case : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
_snake_case : Optional[int] = 1
return True
_snake_case : Any = (not i < 0) and (not j < 0) # Check lower bounds
_snake_case : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_snake_case : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_snake_case : List[Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
_snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
a__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Any:
_snake_case : Union[str, Any] = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case : Union[str, Any] = line.strip()
if line:
_snake_case : List[str] = line.split()
_snake_case : List[Any] = line_number
_snake_case : int = words[0]
_snake_case : str = value
return result
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
for attribute in key.split(""".""" ):
_snake_case : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
_snake_case : Union[str, Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_snake_case : Any = """param"""
if weight_type is not None and weight_type != "param":
_snake_case : int = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
_snake_case : Dict = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Any = shape_pointer.shape
# let's reduce dimension
_snake_case : str = value[0]
else:
_snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case : Dict = value
elif weight_type == "weight_g":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
_snake_case : str = value
elif weight_type == "bias":
_snake_case : Dict = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_snake_case : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : int = value
else:
_snake_case : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
_snake_case : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_snake_case : Optional[Any] = """param"""
if weight_type is not None and weight_type != "param":
_snake_case : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_snake_case : List[str] = """.""".join([key, hf_param_name] )
else:
_snake_case : Optional[int] = key
_snake_case : Any = value if """lm_head""" in full_key else value[0]
a__ = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> Optional[Any]:
_snake_case : Any = False
for key, mapped_key in MAPPING.items():
_snake_case : List[str] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Optional[int] = True
if "*" in mapped_key:
_snake_case : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
_snake_case : List[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case : Tuple = """weight_g"""
elif "weight_v" in name:
_snake_case : Dict = """weight_v"""
elif "bias" in name:
_snake_case : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : List[Any] = """weight"""
else:
_snake_case : Dict = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
_snake_case : Optional[Any] = []
_snake_case : Tuple = fairseq_model.state_dict()
_snake_case : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : List[str] = True
else:
_snake_case : Dict = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
_snake_case : List[str] = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : Optional[Any] = int(items[0] )
_snake_case : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
if config_path is not None:
_snake_case : List[Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : Union[str, Any] = WavaVecaConfig()
if is_seq_class:
_snake_case : Union[str, Any] = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : str = idalabel
_snake_case : Union[str, Any] = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
_snake_case : Optional[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Tuple = target_dict.pad_index
_snake_case : Optional[int] = target_dict.bos_index
_snake_case : int = target_dict.eos_index
_snake_case : Any = len(target_dict.symbols )
_snake_case : int = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : Optional[int] = 0
_snake_case : Tuple = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
_snake_case : Any = True if config.feat_extract_norm == """layer""" else False
_snake_case : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
_snake_case : List[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : List[str] = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
_snake_case , _snake_case , _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case : Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
_snake_case : Dict = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case , _snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
a__ = parser.parse_args()
a__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 198
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class A ( a ):
__UpperCAmelCase : Any = """efficientformer"""
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case_ = [True, True, True, True] , snake_case_ = 4_4_8 , snake_case_ = 3_2 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 1_6 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1E-5 , snake_case_ = "gelu" , snake_case_ = 0.02 , snake_case_ = 1E-1_2 , snake_case_ = 2_2_4 , snake_case_ = 1E-0_5 , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
_a = hidden_act
_a = hidden_dropout_prob
_a = hidden_sizes
_a = num_hidden_layers
_a = num_attention_heads
_a = initializer_range
_a = layer_norm_eps
_a = patch_size
_a = num_channels
_a = depths
_a = mlp_expansion_ratio
_a = downsamples
_a = dim
_a = key_dim
_a = attention_ratio
_a = resolution
_a = pool_size
_a = downsample_patch_size
_a = downsample_stride
_a = downsample_pad
_a = drop_path_rate
_a = num_metaad_blocks
_a = distillation
_a = use_layer_scale
_a = layer_scale_init_value
_a = image_size
_a = batch_norm_eps
| 131
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Any = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 131
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A =argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__A =parser.parse_args()
__A ="cpu"
__A ="a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__A ="path-to-your-trained-model"
__A =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A =pipe.to(device)
# to channels last
__A =pipe.unet.to(memory_format=torch.channels_last)
__A =pipe.vae.to(memory_format=torch.channels_last)
__A =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A =torch.randn(2, 4, 6_4, 6_4)
__A =torch.rand(1) * 9_9_9
__A =torch.randn(2, 7_7, 7_6_8)
__A =(sample, timestep, encoder_hidden_status)
try:
__A =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A =6_6_6
__A =torch.Generator(device).manual_seed(seed)
__A ={"generator": generator}
if args.steps is not None:
__A =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 241
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__A ={
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
return torch.atana(_UpperCAmelCase , _UpperCAmelCase ) / math.pi * 2
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = torch.sin(t * math.pi / 2 ) ** 2
__UpperCAmelCase : List[str] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
pass
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a_ : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Any = DiffusionAttnUnetaD(a_ , n_attn_layers=4 )
__UpperCAmelCase : Dict = deepcopy(self.diffusion )
__UpperCAmelCase : List[Any] = torch.quasirandom.SobolEngine(1 , scramble=a_ )
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__A ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__A ={
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__A ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__A ={
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__A ={
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__A ={
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return name.replace(_UpperCAmelCase , _UpperCAmelCase )
elif name.startswith(_UpperCAmelCase ):
return [name.replace(_UpperCAmelCase , _UpperCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=13 ):
'''simple docstring'''
__UpperCAmelCase : str = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
__UpperCAmelCase : Optional[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
__UpperCAmelCase : Optional[int] = string[6:]
elif string.startswith('''net.''' ):
__UpperCAmelCase : Optional[int] = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__UpperCAmelCase : Optional[Any] = string[7:]
if string.startswith('''main.''' ):
__UpperCAmelCase : List[Any] = string[5:]
# mid block
if string[:2].isdigit():
__UpperCAmelCase : str = string[:2]
__UpperCAmelCase : List[str] = string[2:]
else:
__UpperCAmelCase : Tuple = string[0]
__UpperCAmelCase : Optional[Any] = string[1:]
if depth == max_depth:
__UpperCAmelCase : Tuple = MID_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : Optional[Any] = '''mid_block'''
elif depth > 0 and int(_UpperCAmelCase ) < 7:
__UpperCAmelCase : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : List[str] = f'down_blocks.{depth}'
elif depth > 0 and int(_UpperCAmelCase ) > 7:
__UpperCAmelCase : List[Any] = UP_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : str = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__UpperCAmelCase : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
__UpperCAmelCase : Dict = f'up_blocks.{max_depth - 1}' if int(_UpperCAmelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
__UpperCAmelCase : str = string_left[1:]
if "resnets" in new_layer:
__UpperCAmelCase : Optional[int] = convert_resconv_naming(_UpperCAmelCase )
elif "attentions" in new_layer:
__UpperCAmelCase : Any = convert_attn_naming(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = new_string_left
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Tuple = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__UpperCAmelCase : Optional[int] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__UpperCAmelCase : Any = rename(_UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : str = transform_conv_attns(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__UpperCAmelCase : List[Any] = v
return new_state_dict
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__UpperCAmelCase : Tuple = v[:, :, 0]
else:
# bias
__UpperCAmelCase : List[str] = v
else:
# qkv matrices
__UpperCAmelCase : Union[str, Any] = v.shape[0]
__UpperCAmelCase : List[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__UpperCAmelCase : Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__UpperCAmelCase : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__UpperCAmelCase : Optional[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__UpperCAmelCase : Any = download(_UpperCAmelCase )
__UpperCAmelCase : Any = MODELS_MAP[model_name]['''sample_rate''']
__UpperCAmelCase : Tuple = MODELS_MAP[model_name]['''sample_size''']
__UpperCAmelCase : Optional[Any] = Object()
__UpperCAmelCase : List[str] = sample_size
__UpperCAmelCase : List[str] = sample_rate
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : int = UNetaDModel(sample_size=_UpperCAmelCase , sample_rate=_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = diffusers_model.state_dict()
__UpperCAmelCase : List[Any] = DiffusionUncond(_UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCAmelCase )['''state_dict'''] )
__UpperCAmelCase : int = orig_model.diffusion_ema.eval()
__UpperCAmelCase : List[Any] = orig_model.state_dict()
__UpperCAmelCase : List[str] = rename_orig_weights(_UpperCAmelCase )
__UpperCAmelCase : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__UpperCAmelCase : Any = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(_UpperCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__UpperCAmelCase : List[Any] = value.squeeze()
__UpperCAmelCase : Union[str, Any] = value
diffusers_model.load_state_dict(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = 1_00
__UpperCAmelCase : List[Any] = 33
__UpperCAmelCase : Union[str, Any] = IPNDMScheduler(num_train_timesteps=_UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.manual_seed(_UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn([1, 2, config.sample_size] , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=_UpperCAmelCase )[:-1]
__UpperCAmelCase : str = get_crash_schedule(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = DanceDiffusionPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
__UpperCAmelCase : Any = torch.manual_seed(33 )
__UpperCAmelCase : Any = pipe(num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase ).audios
__UpperCAmelCase : Optional[Any] = sampling.iplms_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {} )
__UpperCAmelCase : List[Any] = generated.clamp(-1 , 1 )
__UpperCAmelCase : int = (generated - audio).abs().sum()
__UpperCAmelCase : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _UpperCAmelCase )
print('''Diff max''' , _UpperCAmelCase )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__A =parser.parse_args()
main(args)
| 241
| 1
|
import os
def a__ ( ):
'''simple docstring'''
with open(os.path.dirname(A__ ) + """/p022_names.txt""" ) as file:
__magic_name__ = str(file.readlines()[0] )
__magic_name__ = names.replace("""\"""", """""" ).split(""",""" )
names.sort()
__magic_name__ = 0
__magic_name__ = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
__magic_name__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 529
|
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CycleDiffusionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , '''half''' ):
SCREAMING_SNAKE_CASE_ = module.half()
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowercase (self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A black colored car'''
SCREAMING_SNAKE_CASE_ = '''A blue colored car'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A black colored car'''
SCREAMING_SNAKE_CASE_ = '''A blue colored car'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 705
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index
| 628
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> str:
if height >= 1:
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
move_disk(__UpperCAmelCase , __UpperCAmelCase )
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
print('moving disk from' , __UpperCAmelCase , 'to' , __UpperCAmelCase )
def UpperCAmelCase_ ( ) -> int:
SCREAMING_SNAKE_CASE_ = int(input('Height of hanoi: ' ).strip() )
move_tower(__UpperCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 31
|
import copy
import random
from transformers import CLIPTokenizer
class A_ ( __a ):
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : Tuple ):
super().__init__(*snake_case__ , **snake_case__ )
lowercase = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Any , *snake_case__ : Tuple , **snake_case__ : str ):
lowercase = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Any , *snake_case__ : int , snake_case__ : List[str]=1 , **snake_case__ : str ):
lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
lowercase = []
for i in range(snake_case__ ):
lowercase = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowercase = output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : int=False , snake_case__ : str=1.0 ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase = self.token_map[placeholder_token]
lowercase = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
lowercase = text.replace(snake_case__ , """ """.join(snake_case__ ) )
return text
def __call__( self : Optional[Any] , snake_case__ : str , *snake_case__ : Any , snake_case__ : Dict=False , snake_case__ : Any=1.0 , **snake_case__ : Any ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : Tuple , snake_case__ : Dict=False , snake_case__ : Optional[Any]=1.0 , **snake_case__ : Tuple ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 428
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = 1
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = len(_lowerCAmelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowerCAmelCase )}.''' )
# get prompt text embeddings
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
lowerCamelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = text_embeddings.shape
lowerCamelCase__ = text_embeddings.repeat(1 ,_lowerCAmelCase ,1 )
lowerCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt ,_lowerCAmelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ = 42
if negative_prompt is None:
lowerCamelCase__ = [""""""]
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !='''
F''' {type(_lowerCAmelCase )}.''' )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = text_input_ids.shape[-1]
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ = uncond_embeddings.shape[1]
lowerCamelCase__ = uncond_embeddings.repeat(_lowerCAmelCase ,_lowerCAmelCase ,1 )
lowerCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt ,_lowerCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase__ = torch.randn(
_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(self.device )
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase__ = torch.randn(
_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase__ = latents_reference.to(self.device )
lowerCamelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase__ = 0 if dx < 0 else dx
lowerCamelCase__ = 0 if dy < 0 else dy
lowerCamelCase__ = max(-dx ,0 )
lowerCamelCase__ = max(-dy ,0 )
# import pdb
# pdb.set_trace()
lowerCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_eta:
lowerCamelCase__ = eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 1 / 0.1_8215 * latents
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase__ = self.feature_extractor(self.numpy_to_pil(_lowerCAmelCase ) ,return_tensors="""pt""" ).to(
self.device )
lowerCamelCase__ , lowerCamelCase__ = self.safety_checker(
images=_lowerCAmelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase__ = None
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
| 9
|
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9
| 1
|
import math
import tensorflow as tf
from packaging import version
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
A = tf.convert_to_tensor(lowerCAmelCase__ )
A = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
A = tf.convert_to_tensor(lowerCAmelCase__ )
A = tf.cast(math.pi , x.dtype )
A = tf.cast(0.044715 , x.dtype )
A = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase__ , 3 )) ))
return x * cdf
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = tf.convert_to_tensor(lowerCAmelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCAmelCase__ ) )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A = tf.convert_to_tensor(lowerCAmelCase__ )
A = tf.cast(0.044715 , x.dtype )
A = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = tf.convert_to_tensor(lowerCAmelCase__ )
A = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCAmelCase__ ) , -10 , 10 )
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=-1 ) -> List[str]:
'''simple docstring'''
A , A = tf.split(lowerCAmelCase__ , 2 , axis=lowerCAmelCase__ )
return a * tf.math.sigmoid(lowerCAmelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
return tf.keras.activations.gelu(lowerCAmelCase__ , approximate=lowerCAmelCase__ )
__snake_case :List[str] =tf.keras.activations.gelu
__snake_case :Optional[Any] =approximate_gelu_wrap
else:
__snake_case :Any =_gelu
__snake_case :str =_gelu_new
__snake_case :List[Any] ={
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 106
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( _A :list[float] )-> Optional[Any]:
return np.maximum(0 , _A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 551
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[str] = ['image_processor', 'tokenizer']
lowerCamelCase_ : str = 'BridgeTowerImageProcessor'
lowerCamelCase_ : List[str] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ) -> BatchEncoding:
snake_case_ = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , do_normalize=lowerCamelCase , do_center_crop=lowerCamelCase , **lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def lowerCAmelCase_ ( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase_ ( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 161
|
import unittest
import numpy as np
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
snake_case_ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
snake_case_ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(lowercase_ )
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 161
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( lowerCAmelCase__ ):
def __init__( self: Tuple ,*__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
_lowerCamelCase : Any = eval_examples
_lowerCamelCase : Union[str, Any] = post_process_function
_lowerCamelCase : int = quant_trainer_args
_lowerCamelCase : Dict = 128 # default number of calibration samples
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any]=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
_lowerCamelCase : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
_lowerCamelCase : Optional[int] = self._remove_unused_columns(_lowerCamelCase ,description="Calibration" )
return DataLoader(
_lowerCamelCase ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=_lowerCamelCase ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int]=None ):
'''simple docstring'''
_lowerCamelCase : int = self.train_dataset if calib_dataset is None else calib_dataset
_lowerCamelCase : List[str] = self.get_calib_dataloader(_lowerCamelCase )
_lowerCamelCase : List[str] = self.model
quant_trainer.configure_model(_lowerCamelCase ,self.quant_trainer_args ,calib=_lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(_lowerCamelCase )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(_lowerCamelCase ):
# Prediction step
_lowerCamelCase : List[str] = self.prediction_step(_lowerCamelCase ,_lowerCamelCase ,prediction_loss_only=_lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_lowerCamelCase ,self.quant_trainer_args )
_lowerCamelCase : Tuple = model
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[str] = "eval" ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCamelCase : List[Any] = self.get_eval_dataloader(_lowerCamelCase )
_lowerCamelCase : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Optional[Any] = self.compute_metrics
_lowerCamelCase : int = None
_lowerCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : str = eval_loop(
_lowerCamelCase ,description="Evaluation" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_lowerCamelCase ,)
finally:
_lowerCamelCase : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_lowerCamelCase : int = self.post_process_function(_lowerCamelCase ,_lowerCamelCase ,output.predictions )
_lowerCamelCase : List[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowerCamelCase : Optional[int] = metrics.pop(_lowerCamelCase )
self.log(_lowerCamelCase )
else:
_lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowerCamelCase : Optional[int] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_lowerCamelCase )
return metrics
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: int = "test" ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Union[str, Any] = self.compute_metrics
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : Optional[Any] = eval_loop(
_lowerCamelCase ,description="Prediction" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_lowerCamelCase ,)
finally:
_lowerCamelCase : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCamelCase : Dict = self.post_process_function(_lowerCamelCase ,_lowerCamelCase ,output.predictions ,"predict" )
_lowerCamelCase : Any = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowerCamelCase : Any = metrics.pop(_lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_lowerCamelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Tuple="./" ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.eval_dataset
_lowerCamelCase : int = self.get_eval_dataloader(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = next(iter(_lowerCamelCase ) )
# saving device - to make it consistent
_lowerCamelCase : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
_lowerCamelCase : List[Any] = tuple(v.to(_lowerCamelCase ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Tuple = self.model.to(_lowerCamelCase )
model.eval()
model.float()
_lowerCamelCase : str = model.module if hasattr(_lowerCamelCase ,"module" ) else model
quant_trainer.configure_model(_lowerCamelCase ,self.quant_trainer_args )
_lowerCamelCase : List[str] = os.path.join(_lowerCamelCase ,"model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
_lowerCamelCase : Optional[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,export_params=_lowerCamelCase ,opset_version=13 ,do_constant_folding=_lowerCamelCase ,input_names=["input_ids", "attention_mask", "token_type_ids"] ,output_names=["output_start_logits", "output_end_logits"] ,dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} ,verbose=_lowerCamelCase ,)
logger.info("onnx export finished" )
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182
| 0
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCamelCase : List[str] = get_logger(__name__)
class _lowercase :
_a : str = 'dummy_data'
_a : Optional[int] = 'datasets'
_a : Optional[Any] = False
def __init__( self , a , a , a , a = None , a = False , a = True , a = None , ):
snake_case__ : List[Any] =0
snake_case__ : str =dataset_name
snake_case__ : int =cache_dir
snake_case__ : int =use_local_dummy_data
snake_case__ : int =config
# download_callbacks take a single url as input
snake_case__ : List[Callable] =download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : int =load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : List[Any] =str(a )
# to be downloaded
snake_case__ : Optional[int] =None
snake_case__ : Dict =None
@property
def lowercase__ ( self ):
if self._dummy_file is None:
snake_case__ : Dict =self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase__ ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase__ ( self ):
snake_case__ : Optional[Any] =(
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Union[str, Any] =cached_path(
a , cache_dir=self.cache_dir , extract_compressed_file=a , force_extract=a )
return os.path.join(a , self.dummy_file_name )
@property
def lowercase__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self ):
if self._bucket_url is None:
snake_case__ : Any =hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase__ ( self , a , *a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : int =self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : Optional[Any] =self.dummy_file_name
# special case when data_url is a dict
if isinstance(a , a ):
return self.create_dummy_data_dict(a , a )
elif isinstance(a , (list, tuple) ):
return self.create_dummy_data_list(a , a )
else:
return self.create_dummy_data_single(a , a )
def lowercase__ ( self , a , *a ):
return self.download_and_extract(a )
def lowercase__ ( self , a , a ):
return self.download_and_extract(a )
def lowercase__ ( self , a , *a , **a ):
return path
def lowercase__ ( self ):
return {}
def lowercase__ ( self , a , a ):
snake_case__ : Union[str, Any] ={}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a , a ):
for single_url in single_urls:
download_callback(a )
else:
snake_case__ : List[str] =single_urls
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a , a ):
snake_case__ : int =[os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) ) for x in single_urls]
else:
snake_case__ : Union[str, Any] =single_urls
snake_case__ : Optional[int] =os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) )
snake_case__ : str =value
# make sure that values are unique
if all(isinstance(a , a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[str] ={key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self , a , a ):
snake_case__ : Tuple =[]
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : List[Any] =all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , a ) ) for url in data_url )
snake_case__ : str =all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : int =[data_url[0]] * len(a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] =os.path.join(a , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(a )
return dummy_data_list
def lowercase__ ( self , a , a ):
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : str =os.path.join(a , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self ):
pass
def lowercase__ ( self ):
pass
def lowercase__ ( self , a ):
def _iter_archive_members(a ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : int =Path(self.dummy_file ).parent
snake_case__ : int =path.relative_to(a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : List[str] =zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a )
snake_case__ : List[Any] =Path(a )
snake_case__ : Dict =_iter_archive_members(a ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(a ).as_posix(), file_path.open("""rb""" )
def lowercase__ ( self , a ):
if not isinstance(a , a ):
snake_case__ : List[Any] =[paths]
for path in paths:
if os.path.isfile(a ):
if os.path.basename(a ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a ):
if os.path.basename(a ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(a ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(a , a )
| 713
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( _a : np.ndarray , _a : Union[int, Iterable[int]] , _a : bool , _a : int ):
'''simple docstring'''
def constraint_to_multiple_of(_a : Union[str, Any] , _a : List[str] , _a : str=0 , _a : Any=None ):
snake_case__ : Any =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case__ : int =math.floor(val / multiple ) * multiple
if x < min_val:
snake_case__ : Dict =math.ceil(val / multiple ) * multiple
return x
snake_case__ : str =(output_size, output_size) if isinstance(_a , _a ) else output_size
snake_case__ , snake_case__ : Dict =get_image_size(_a )
snake_case__ , snake_case__ : int =output_size
# determine new height and width
snake_case__ : Tuple =output_height / input_height
snake_case__ : Optional[Any] =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case__ : Optional[int] =scale_width
else:
# fit height
snake_case__ : Any =scale_height
snake_case__ : Optional[int] =constraint_to_multiple_of(scale_height * input_height , multiple=_a )
snake_case__ : str =constraint_to_multiple_of(scale_width * input_width , multiple=_a )
return (new_height, new_width)
class _lowercase ( _A ):
_a : List[Any] = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
snake_case__ : Any =size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
snake_case__ : List[Any] =get_size_dict(a )
snake_case__ : Tuple =do_resize
snake_case__ : Tuple =size
snake_case__ : Any =keep_aspect_ratio
snake_case__ : List[Any] =ensure_multiple_of
snake_case__ : Tuple =resample
snake_case__ : str =do_rescale
snake_case__ : int =rescale_factor
snake_case__ : Tuple =do_normalize
snake_case__ : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Any =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
snake_case__ : Tuple =get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case__ : Optional[int] =get_resize_output_image_size(
a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def lowercase__ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def lowercase__ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowercase__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
snake_case__ : Optional[int] =do_resize if do_resize is not None else self.do_resize
snake_case__ : List[Any] =size if size is not None else self.size
snake_case__ : int =get_size_dict(a )
snake_case__ : Optional[int] =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case__ : int =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case__ : Optional[Any] =resample if resample is not None else self.resample
snake_case__ : Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Tuple =image_mean if image_mean is not None else self.image_mean
snake_case__ : int =image_std if image_std is not None else self.image_std
snake_case__ : int =make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : int =[to_numpy_array(a ) for image in images]
if do_resize:
snake_case__ : List[str] =[self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
snake_case__ : List[Any] =[self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
snake_case__ : str =[self.normalize(image=a , mean=a , std=a ) for image in images]
snake_case__ : List[Any] =[to_channel_dimension_format(a , a ) for image in images]
snake_case__ : Union[str, Any] ={"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
def lowercase__ ( self , a , a = None ):
snake_case__ : Optional[Any] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(a ):
snake_case__ : Optional[Any] =target_sizes.numpy()
snake_case__ : Optional[Any] =[]
for idx in range(len(a ) ):
snake_case__ : List[str] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=a )
snake_case__ : List[Any] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
snake_case__ : List[str] =logits.argmax(dim=1 )
snake_case__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 448
| 0
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : tuple , snake_case_ : Path , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Tuple=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , use_external_data_format=snake_case_ , enable_onnx_checker=snake_case_ , opset_version=snake_case_ , )
else:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , opset_version=snake_case_ , )
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=snake_case_ ).to(snake_case_ )
_lowerCAmelCase = Path(snake_case_ )
# TEXT ENCODER
_lowerCAmelCase = pipeline.text_encoder.config.max_position_embeddings
_lowerCAmelCase = pipeline.text_encoder.config.hidden_size
_lowerCAmelCase = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=snake_case_ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=snake_case_ , )
del pipeline.text_encoder
# UNET
_lowerCAmelCase = pipeline.unet.config.in_channels
_lowerCAmelCase = pipeline.unet.config.sample_size
_lowerCAmelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=snake_case_ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=snake_case_ , use_external_data_format=snake_case_ , )
_lowerCAmelCase = str(unet_path.absolute().as_posix() )
_lowerCAmelCase = os.path.dirname(snake_case_ )
_lowerCAmelCase = onnx.load(snake_case_ )
# clean up existing tensor files
shutil.rmtree(snake_case_ )
os.mkdir(snake_case_ )
# collate external tensor files into one
onnx.save_model(
snake_case_ , snake_case_ , save_as_external_data=snake_case_ , all_tensors_to_one_file=snake_case_ , location="""weights.pb""" , convert_attribute=snake_case_ , )
del pipeline.unet
# VAE ENCODER
_lowerCAmelCase = pipeline.vae
_lowerCAmelCase = vae_encoder.config.in_channels
_lowerCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowerCAmelCase = lambda snake_case_ , snake_case_ : vae_encoder.encode(snake_case_ , snake_case_ )[0].sample()
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=snake_case_ , )
# VAE DECODER
_lowerCAmelCase = pipeline.vae
_lowerCAmelCase = vae_decoder.config.latent_channels
_lowerCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
_lowerCAmelCase = vae_encoder.decode
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=snake_case_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowerCAmelCase = pipeline.safety_checker
_lowerCAmelCase = safety_checker.config.vision_config.num_channels
_lowerCAmelCase = safety_checker.config.vision_config.image_size
_lowerCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , snake_case_ , snake_case_ , snake_case_ , ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=snake_case_ , )
del pipeline.safety_checker
_lowerCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
_lowerCAmelCase = pipeline.feature_extractor
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=snake_case_ , feature_extractor=snake_case_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(snake_case_ )
print("""ONNX pipeline saved to""" , snake_case_ )
del pipeline
del onnx_pipeline
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 156
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'wav2vec2'
def __init__(self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="sum" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=(512, 512, 512, 512, 1_500) , lowerCamelCase=(5, 3, 3, 1, 1) , lowerCamelCase=(1, 2, 3, 1, 1) , lowerCamelCase=512 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = do_stable_layer_norm
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase = num_codevectors_per_group
_lowerCAmelCase = num_codevector_groups
_lowerCAmelCase = contrastive_logits_temperature
_lowerCAmelCase = feat_quantizer_dropout
_lowerCAmelCase = num_negatives
_lowerCAmelCase = codevector_dim
_lowerCAmelCase = proj_codevector_dim
_lowerCAmelCase = diversity_loss_weight
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
_lowerCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = xvector_output_dim
@property
def A__ (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 156
| 1
|
# Lint as: python3
import itertools
import os
import re
_SCREAMING_SNAKE_CASE = re.compile(R"([A-Z]+)([A-Z][a-z])")
_SCREAMING_SNAKE_CASE = re.compile(R"([a-z\d])([A-Z])")
_SCREAMING_SNAKE_CASE = re.compile(R"(?<!_)_(?!_)")
_SCREAMING_SNAKE_CASE = re.compile(R"(_{2,})")
_SCREAMING_SNAKE_CASE = R"^\w+(\.\w+)*$"
_SCREAMING_SNAKE_CASE = R"<>:/\|?*"
def _snake_case (_snake_case : int) -> List[Any]:
_lowercase = _uppercase_uppercase_re.sub(R'\1_\2' , _snake_case)
_lowercase = _lowercase_uppercase_re.sub(R'\1_\2' , _snake_case)
return name.lower()
def _snake_case (_snake_case : Optional[Any]) -> Dict:
_lowercase = _single_underscore_re.split(_snake_case)
_lowercase = [_multiple_underscores_re.split(_snake_case) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case) if n != '')
def _snake_case (_snake_case : List[Any]) -> Union[str, Any]:
if os.path.basename(_snake_case) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
return camelcase_to_snakecase(_snake_case)
def _snake_case (_snake_case : Any , _snake_case : Dict) -> Union[str, Any]:
if os.path.basename(_snake_case) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
if not re.match(_split_re , _snake_case):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''')
return f'''{filename_prefix_for_name(_snake_case)}-{split}'''
def _snake_case (_snake_case : Optional[int] , _snake_case : Dict , _snake_case : int , _snake_case : str=None) -> Optional[int]:
_lowercase = filename_prefix_for_split(_snake_case , _snake_case)
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_lowercase = os.path.join(_snake_case , _snake_case)
return f'''{filepath}*'''
def _snake_case (_snake_case : List[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : str=None , _snake_case : int=None) -> Union[str, Any]:
_lowercase = filename_prefix_for_split(_snake_case , _snake_case)
_lowercase = os.path.join(_snake_case , _snake_case)
if shard_lengths:
_lowercase = len(_snake_case)
_lowercase = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_snake_case)]
if filetype_suffix:
_lowercase = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_lowercase = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 705
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =torch.nn.Linear(10, 10)
_lowercase =torch.optim.SGD(model.parameters(), 0.1)
_lowercase =Accelerator()
_lowercase =accelerator.prepare(snake_case)
try:
pickle.loads(pickle.dumps(snake_case))
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 557
| 0
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a=1_024 , _a=1_024 , _a=3.6 ):
__a = tokenizer
__a = tokenizer.bos_token_id
__a = dataset
__a = seq_length
__a = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
__a = iter(self.dataset )
__a = True
while more_examples:
__a = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_a )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__a = False
break
__a = tokenizer(_a , truncation=_a )["input_ids"]
__a = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_a ) , self.seq_length ):
__a = all_token_ids[i : i + self.seq_length]
if len(_a ) == self.seq_length:
yield torch.tensor(_a )
def lowercase ( lowerCAmelCase__ : str ) -> Dict:
__a = {"streaming": True}
__a = load_dataset(args.dataset_name , split='''train''' , **_UpperCamelCase )
__a = ConstantLengthDataset(_UpperCamelCase , _UpperCamelCase , seq_length=args.seq_length )
__a = DataLoader(_UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
model.eval()
__a = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__a = model(_UpperCamelCase , labels=_UpperCamelCase )
__a = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__a = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__a = torch.exp(_UpperCamelCase )
except OverflowError:
__a = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowercase_ = Accelerator()
# Parse configuration
lowercase_ = HfArgumentParser(EvaluationArguments)
lowercase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowercase_ = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
lowercase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowercase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowercase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowercase_ , lowercase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
lowercase_ , lowercase_ = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 695
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=24 , lowerCamelCase=2 , lowerCamelCase=6 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=1000 , ) ->Optional[int]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = range_bbox
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = None
if self.use_input_mask:
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Optional[Any]:
'''simple docstring'''
__a = LiltModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase , bbox=lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase , bbox=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->str:
'''simple docstring'''
__a = self.num_labels
__a = LiltForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = LiltForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =False
__a =False
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
return True
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = LiltModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = LiltModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowerCamelCase )
__a = torch.tensor([[1, 2]] , device=lowerCamelCase )
__a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(input_ids=lowerCamelCase , bbox=lowerCamelCase )
__a = torch.Size([1, 2, 768] )
__a = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase , atol=1e-3 ) )
| 270
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vivit"
def __init__( self , lowerCamelCase=224 , lowerCamelCase=32 , lowerCamelCase=[2, 16, 16] , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_fast" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**lowerCamelCase )
| 270
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
super().setUp()
A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]:
A = 'adapt act apte'
A = 'adapt act apte'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'adapt act apte'
A = ['adapt', 'act', 'ap@@', 'te']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
A = 'I am a small frog.'
A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids']
A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A = 'I am a small frog .'
A = '.'
A = tok(A_ )['input_ids']
A = tok(A_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 91
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BioGptTokenizer
_lowerCamelCase: Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = BioGptTokenizer(self.vocab_file ,self.merges_file )
A = 'lower'
A = ['low', 'er</w>']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + ['<unk>']
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 91
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ : Tuple = """scheduler_config.json"""
class snake_case ( __lowercase ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class snake_case ( __lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
class snake_case :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['dtype']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def snake_case ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , **__A , )
lowerCamelCase_ ,lowerCamelCase_ = cls.from_config(__A , return_unused_kwargs=__A , **__A )
if hasattr(__A , "create_state" ) and getattr(__A , "has_state" , __A ):
lowerCamelCase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ):
"""simple docstring"""
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def snake_case ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
lowerCamelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase_ = importlib.import_module(__name__.split("." )[0] )
lowerCamelCase_ = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=0.999 , UpperCAmelCase_ : List[Any]=jnp.floataa ):
def alpha_bar(UpperCAmelCase_ : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCamelCase_ = []
for i in range(a__ ):
lowerCamelCase_ = i / num_diffusion_timesteps
lowerCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def snake_case ( cls , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = scheduler.config
if config.trained_betas is not None:
lowerCamelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCamelCase_ = 1.0 - betas
lowerCamelCase_ = jnp.cumprod(__A , axis=0 )
return cls(
alphas=__A , betas=__A , alphas_cumprod=__A , )
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
lowerCamelCase_ = state.alphas_cumprod
lowerCamelCase_ = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ = sqrt_alpha_prod.flatten()
lowerCamelCase_ = broadcast_to_shape_from_left(a__ , original_samples.shape )
lowerCamelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase_ = broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
lowerCamelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
lowerCamelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 714
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
A_ = downstream_dict["projector.weight"]
A_ = downstream_dict["projector.bias"]
A_ = downstream_dict["model.post_net.linear.weight"]
A_ = downstream_dict["model.post_net.linear.bias"]
return model
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
A_ = downstream_dict["model.linear.weight"]
A_ = downstream_dict["model.linear.bias"]
return model
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = WavaVecaForXVector.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
A_ = downstream_dict["connector.weight"]
A_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A_ = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
A_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = torch.load(UpperCamelCase_ , map_location="cpu" )
A_ = checkpoint["Downstream"]
A_ = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
A_ = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
A_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
A_ = convert_classification(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif arch.endswith("ForAudioFrameClassification" ):
A_ = convert_diarization(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif arch.endswith("ForXVector" ):
A_ = convert_xvector(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 452
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] ="transfo-xl"
_UpperCAmelCase : str =["mems"]
_UpperCAmelCase : Optional[int] ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , lowerCAmelCase : Tuple=26_77_35 , lowerCAmelCase : List[Any]=[2_00_00, 4_00_00, 20_00_00] , lowerCAmelCase : Dict=10_24 , lowerCAmelCase : List[str]=10_24 , lowerCAmelCase : int=16 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : str=40_96 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any=False , lowerCAmelCase : List[str]=18 , lowerCAmelCase : Any=16_00 , lowerCAmelCase : List[str]=10_00 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Tuple=-1 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]="normal" , lowerCAmelCase : Tuple=0.0_1 , lowerCAmelCase : Optional[int]=0.0_1 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Dict=1e-5 , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : List[str] , ):
A_ = vocab_size
A_ = []
self.cutoffs.extend(lowerCAmelCase )
if proj_share_all_but_first:
A_ = [False] + [True] * len(self.cutoffs )
else:
A_ = [False] + [False] * len(self.cutoffs )
A_ = d_model
A_ = d_embed
A_ = d_head
A_ = d_inner
A_ = div_val
A_ = pre_lnorm
A_ = n_layer
A_ = n_head
A_ = mem_len
A_ = same_length
A_ = attn_type
A_ = clamp_len
A_ = sample_softmax
A_ = adaptive
A_ = dropout
A_ = dropatt
A_ = untie_r
A_ = init
A_ = init_range
A_ = proj_init_std
A_ = init_std
A_ = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase , **lowerCAmelCase )
@property
def _UpperCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 452
| 1
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [0, 2, 4, 6, 8]
_SCREAMING_SNAKE_CASE = [1, 3, 5, 7, 9]
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return result
def __a(SCREAMING_SNAKE_CASE_ : int = 9 ):
'''simple docstring'''
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE_ , 0 , [0] * length , SCREAMING_SNAKE_CASE_ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_SCREAMING_SNAKE_CASE = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCamelCase : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCamelCase : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _snake_case ( self ) -> str:
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
_lowerCAmelCase = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
_lowerCAmelCase = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
_lowerCAmelCase = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
_lowerCAmelCase = text_classifier("This is great !" , return_all_scores=_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
_lowerCAmelCase = text_classifier("This is great !" , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
_lowerCAmelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
_lowerCAmelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def _snake_case ( self ) -> Tuple:
import torch
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def _snake_case ( self ) -> Any:
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = pipeline("text-classification" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def _snake_case ( self ) -> Any:
_lowerCAmelCase = pipeline("text-classification" , framework="tf" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = TextClassificationPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCAmelCase = "HuggingFace is in"
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
_lowerCAmelCase = ["HuggingFace is in ", "Paris is in France"]
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}, {"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCAmelCase = text_classifier(_lowerCAmelCase , top_k=_lowerCAmelCase )
_lowerCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] * N, [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] * N] , )
_lowerCAmelCase = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCAmelCase = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(_lowerCAmelCase ):
text_classifier(_lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCAmelCase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 489
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ : int = logging.get_logger(__name__)
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] ):
lowerCamelCase_ = to_pil_image(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(UpperCAmelCase_ , lang=UpperCAmelCase_ , output_type="dict" , config=UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(UpperCAmelCase_ ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase_ )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["pixel_values"]
def __init__( self , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = "" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = size if size is not None else {"height": 224, "width": 224}
lowerCamelCase_ = get_size_dict(UpperCamelCase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCamelCase_ = (size["height"], size["width"])
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(UpperCamelCase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ ,lowerCamelCase_ = apply_tesseract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
words_batch.append(UpperCamelCase )
boxes_batch.append(UpperCamelCase )
if do_resize:
lowerCamelCase_ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
lowerCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCamelCase )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 675
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 675
| 1
|
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ : Union[str, Any] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta\'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE__ ( __a ):
"""simple docstring"""
_snake_case = 'facebook/nllb-200-distilled-600M'
_snake_case = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_snake_case = 'translator'
_snake_case = AutoTokenizer
_snake_case = AutoModelForSeqaSeqLM
_snake_case = LANGUAGE_CODES
_snake_case = ['text', 'text', 'text']
_snake_case = ['text']
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
__UpperCamelCase = self.lang_to_code[src_lang]
__UpperCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a_ , return_tensors='''pt''' , src_lang=a_ , tgt_lang=a_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
return self.model.generate(**a_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a_ )
| 706
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 451
| 0
|
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def a ( lowerCamelCase_ ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def a ( ):
'''simple docstring'''
lowercase__ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , lowerCamelCase_ )
print('''Decoded:''' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 183
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183
| 1
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
def lowerCamelCase ( self , _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='''utf-8''' ) as input_file:
UpperCAmelCase__ : Any = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCAmelCase__ : Union[str, Any] = input_file.read()
UpperCAmelCase__ : str = regexp.search(_UpperCAmelCase )
return match
def lowerCamelCase ( self , _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='''utf-8''' ) as input_file:
UpperCAmelCase__ : Optional[Any] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : Dict = regexp.finditer(_UpperCAmelCase )
UpperCAmelCase__ : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = Path('''./datasets''' )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = Path('''./datasets''' )
UpperCAmelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 701
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
UpperCAmelCase__ : Any = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : List[str] = image_mean
UpperCAmelCase__ : List[Any] = image_std
def lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = DPTImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 599
| 0
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
a : int = logging.getLogger()
def lowerCamelCase__ ( __lowerCamelCase : Path , __lowerCamelCase : list ):
__UpperCAmelCase : Optional[Any] = """\n""".join(__lowerCamelCase )
Path(__lowerCamelCase ).open("""w""" ).writelines(__lowerCamelCase )
a : Optional[int] = "patrickvonplaten/t5-tiny-random"
a : Tuple = "sshleifer/bart-tiny-random"
a : Optional[int] = "sshleifer/tiny-mbart"
a : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any] ) -> Any:
__UpperCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : Tuple = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__lowercase , __lowercase )
__UpperCAmelCase : int = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__UpperCAmelCase : Tuple = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : Dict = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(__lowercase , """argv""" , __lowercase ):
run_generate()
assert Path(__lowercase ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase ( self : Optional[int] ) -> str:
self.run_eval_tester(__lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] ) -> Any:
self.run_eval_tester(__lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self : str , __lowercase : List[str] ) -> str:
__UpperCAmelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : Tuple = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__UpperCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() )
__UpperCAmelCase : Optional[int] = str(tmp_dir / """scores.json""" )
__UpperCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(__lowercase , text["""en"""] )
_dump_articles(__lowercase , text["""de"""] )
__UpperCAmelCase : Dict = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : Any = f"""
run_eval_search.py
{model}
{str(__lowercase )}
{str(__lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__lowercase , """argv""" , __lowercase ):
with CaptureStdout() as cs:
run_search()
__UpperCAmelCase : List[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
__UpperCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowercase ).exists()
os.remove(Path(__lowercase ) )
| 63
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] , _A : str , _A : Any ):
_UpperCamelCase = TextaTextGenerationPipeline(model=_A , tokenizer=_A )
return generator, ["Something to write", "Something else"]
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[int] ):
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=_A )
self.assertEqual(_A , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=_A , num_beams=_A , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(_A , _A )
_UpperCamelCase = generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=_A )
self.assertEqual(_A , [{'''generated_text''': ''''''}] )
| 71
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 71
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __A ( a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def __A ( a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
def __A ( a_ : int , a_ : Dict=None )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE : Dict = key.replace('''moe_layer.experts.0''' , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE : Dict = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE : int = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
SCREAMING_SNAKE_CASE : Dict = state_dict[old_key]
return new_dict
def __A ( a_ : int , a_ : List[Any] , a_ : Any , a_ : str , a_ : str = WEIGHTS_NAME )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
os.makedirs(a_ , exist_ok=a_ )
for expert in range(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(a_ ):
SCREAMING_SNAKE_CASE : List[str] = torch.load(a_ )['''model''']
remove_ignore_keys_(a_ )
SCREAMING_SNAKE_CASE : str = rename_fairseq_keys(a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
a_ , weights_name.replace('''.bin''' , F"-{len(a_ )+1:05d}-of-???.bin" ) )
torch.save(a_ , a_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(a_ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , weights_name.replace('''.bin''' , F"-{len(a_ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE : Dict = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(a_ )
SCREAMING_SNAKE_CASE : Any = rename_fairseq_keys(a_ , a_ )
SCREAMING_SNAKE_CASE : List[str] = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a_ ) == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a_ , a_ )
torch.save(a_ , a_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a_ , a_ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE : Any = {}
for idx, shard in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Any = weights_name.replace('''.bin''' , F"-{idx+1:05d}-of-{len(a_ ):05d}.bin" )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a_ , weights_name.replace('''.bin''' , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(a_ , os.path.join(a_ , a_ ) )
for key in shard:
SCREAMING_SNAKE_CASE : str = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE : List[Any] = {'''total_size''': total_size}
SCREAMING_SNAKE_CASE : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a_ , a_ ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
return metadata, index
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowerCamelCase__ : List[Any] = parser.parse_args()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCamelCase__ : str = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase__ : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 698
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698
| 1
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ =10_000
lowerCAmelCase__ =None
lowerCAmelCase__ =None
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ =ParquetConfig
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case__ : List[Any] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
snake_case__ : Dict =data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ : Tuple =[dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case__ : Union[str, Any] =[]
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ : Dict =[dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case__ : List[str] =datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : Union[str, Any] =table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case__ : Dict =pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case__ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise
| 381
|
import math
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return 2 * x
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
snake_case__ : Dict =2.0
while start <= a:
snake_case__ : List[Any] =math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 99_99 , SCREAMING_SNAKE_CASE : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
snake_case__ : List[str] =get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =value
snake_case__ : Any =value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase (snake_case__ : int , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase = v
else:
lowerCAmelCase = v
lowerCAmelCase = chkpt["""params"""]
lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase = chkpt["""dico_word2id"""]
lowerCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case__ , snake_case__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 529
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : int , snake_case__ : Tuple ) -> int:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase = os.path.split(snake_case__ )[-1].split(""".""" )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(snake_case__ ),
"""epoch""": epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=snake_case__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 529
| 1
|
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : str = str(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE ) == 9 and set(SCREAMING_SNAKE_CASE ) == set('123456789' )
def __magic_name__ ( ) -> int | None:
for base_num in range(9_999 , 4_999 , -1 ):
_lowercase : Optional[Any] = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowercase : List[str] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 128
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 128
| 1
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowercase = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowercase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def snake_case__ ( _A: int ) -> Any:
'''simple docstring'''
lowerCAmelCase = None
# source code of `config_class`
lowerCAmelCase = inspect.getsource(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase = ckpt_name
break
return checkpoint
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase = get_checkpoint_from_config_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = """\n""".join(sorted(__SCREAMING_SNAKE_CASE ) )
raise ValueError(f"The following configurations don\'t contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 370
|
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 0 ):
'''simple docstring'''
__a = key
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key ) for ch in content]
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key ) for ch in content]
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 0 ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
__a = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key )
return ans
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 0 ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
__a = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key )
return ans
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 0 ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
with open(SCREAMING_SNAKE_CASE__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
except OSError:
return False
return True
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
with open(SCREAMING_SNAKE_CASE__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 582
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = CTRLTokenizer
_lowercase : Dict = False
_lowercase : List[str] = False
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Optional[int] , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any ) -> Any:
"""simple docstring"""
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 718
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase = 5
lowerCAmelCase = 10
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = SpeechaTextTokenizer
_lowercase : List[str] = False
_lowercase : Optional[Any] = True
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
super().setUp()
lowercase__ = sp.SentencePieceProcessor()
spm_model.Load(UpperCamelCase_ )
lowercase__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase_ ) )]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''<pad>'''
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_001 )
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [289, 50, 14, 174, 386] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class _a ( unittest.TestCase ):
_lowercase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
_lowercase : int = '''C\'est trop cool'''
_lowercase : Any = '''Esto es genial'''
@classmethod
def lowerCamelCase_ ( cls: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase__ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''fr'''
lowercase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , UpperCamelCase_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowercase__ = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 429
| 0
|
from copy import deepcopy
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a = None , __a = None ):
'''simple docstring'''
if arr is None and size is not None:
lowerCamelCase = size
lowerCamelCase = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("Either arr or size must be specified" )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
lowerCamelCase = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _a (__a ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def _a (__a ):
'''simple docstring'''
return index - (index & (-index))
def _a (self , __a , __a ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase = self.next_(SCREAMING_SNAKE_CASE_ )
def _a (self , __a , __a ):
'''simple docstring'''
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def _a (self , __a ):
'''simple docstring'''
if right == 0:
return 0
lowerCamelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def _a (self , __a , __a ):
'''simple docstring'''
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def _a (self , __a ):
'''simple docstring'''
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def _a (self , __a ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
|
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : str , a : Tuple=1_3 , a : Optional[Any]=7 , a : int=True , a : Tuple=True , a : List[str]=False , a : str=True , a : Optional[Any]=9_9 , a : str=6_4 , a : str=5 , a : Union[str, Any]=4 , a : Dict=6_4 , a : Union[str, Any]="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=5_1_2 , a : int=1_6 , a : Tuple=2 , a : Optional[Any]=0.02 , a : List[str]=3 , a : Union[str, Any]=4 , a : List[str]=None , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : int = seq_length
lowercase_ : Tuple = is_training
lowercase_ : Dict = use_input_mask
lowercase_ : List[str] = use_token_type_ids
lowercase_ : str = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Optional[int] = num_labels
lowercase_ : str = num_choices
lowercase_ : List[Any] = scope
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
lowercase_ : Dict = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Tuple , a : Union[str, Any] , a : Any , a : Any , a : List[str] , a : Tuple , a : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = MPNetModel(config=a )
model.to(a )
model.eval()
lowercase_ : Optional[int] = model(a , a )
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[Any] , a : int , a : Any , a : Tuple , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowercase_ : str = MPNetForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase_ : Optional[Any] = model(
a , attention_mask=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : Optional[int] , a : List[Any] , a : Dict , a : Dict , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : List[Any] = MPNetForSequenceClassification(a )
model.to(a )
model.eval()
lowercase_ : int = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Tuple , a : Dict , a : Tuple , a : Optional[Any] , a : int , a : List[Any] , a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = self.num_choices
lowercase_ : Any = MPNetForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] , a : Optional[Any] , a : str , a : List[Any] , a : Any , a : Any ):
'''simple docstring'''
lowercase_ : Tuple = self.num_labels
lowercase_ : int = MPNetForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase_ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : Dict = config_and_inputs
lowercase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__lowerCamelCase: Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase: List[str] = False
__lowerCamelCase: Optional[int] = True
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = MPNetModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowercase_ : List[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase_ : str = model(a )[0]
lowercase_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase_ : int = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : str = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : int = "pix2struct_text_model"
_UpperCamelCase : Dict = ["past_key_values"]
_UpperCamelCase : List[Any] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple , lowerCamelCase_ : List[str]=5_02_44 , lowerCamelCase_ : Union[str, Any]=7_68 , lowerCamelCase_ : List[Any]=64 , lowerCamelCase_ : Optional[int]=20_48 , lowerCamelCase_ : str=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : int=1e-6 , lowerCamelCase_ : List[Any]=1.0 , lowerCamelCase_ : Optional[int]="gelu_new" , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : int=False , lowerCamelCase_ : Union[str, Any]=True , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = d_kv
_snake_case : Union[str, Any] = d_ff
_snake_case : Optional[Any] = num_layers
_snake_case : Tuple = num_heads
_snake_case : List[str] = relative_attention_num_buckets
_snake_case : str = relative_attention_max_distance
_snake_case : str = dropout_rate
_snake_case : Optional[Any] = layer_norm_epsilon
_snake_case : List[Any] = initializer_factor
_snake_case : List[Any] = use_cache
_snake_case : int = eos_token_id
_snake_case : Tuple = decoder_start_token_id
# for backwards compatibility
_snake_case : Optional[Any] = dense_act_fn
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , is_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
_snake_case , _snake_case : Union[str, Any] = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_snake_case : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : int = "pix2struct_vision_model"
def __init__( self : List[Any] , lowerCamelCase_ : int=7_68 , lowerCamelCase_ : int=7_68 , lowerCamelCase_ : Any=20_48 , lowerCamelCase_ : Tuple=64 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : Optional[Any]="gelu_new" , lowerCamelCase_ : List[str]=1e-6 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=1e-10 , lowerCamelCase_ : Dict=1.0 , lowerCamelCase_ : Dict=40_96 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : List[str]=1_28 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[Any] = hidden_size
_snake_case : Union[str, Any] = patch_embed_hidden_size
_snake_case : str = d_ff
_snake_case : Dict = dropout_rate
_snake_case : Tuple = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Union[str, Any] = initializer_range
_snake_case : Optional[Any] = initializer_factor
_snake_case : Tuple = attention_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Union[str, Any] = dense_act_fn
_snake_case : List[Any] = seq_len
_snake_case : Any = relative_attention_num_buckets
_snake_case : Tuple = relative_attention_max_distance
_snake_case : int = d_kv
@classmethod
def __UpperCAmelCase ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
_snake_case , _snake_case : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_snake_case : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = "pix2struct"
_UpperCamelCase : int = True
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]=1.0 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]=True , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
if text_config is None:
_snake_case : Any = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_snake_case : Any = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_snake_case : Tuple = PixaStructTextConfig(**lowerCamelCase_ )
_snake_case : List[str] = PixaStructVisionConfig(**lowerCamelCase_ )
_snake_case : Any = self.text_config.decoder_start_token_id
_snake_case : Union[str, Any] = self.text_config.pad_token_id
_snake_case : Tuple = self.text_config.eos_token_id
_snake_case : List[str] = initializer_factor
_snake_case : Optional[Any] = initializer_range
_snake_case : int = self.initializer_range
_snake_case : str = self.initializer_range
_snake_case : Optional[Any] = is_vqa
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , lowerCamelCase_ : PixaStructTextConfig , lowerCamelCase_ : PixaStructVisionConfig , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[str] = copy.deepcopy(self.__dict__ )
_snake_case : Any = self.text_config.to_dict()
_snake_case : Tuple = self.vision_config.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 304
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : str = logging.get_logger(__name__)
def A__( __lowerCAmelCase ):
print('Loading config file...' )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
_snake_case : Dict = []
for k, v in d.items():
_snake_case : Dict = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
_snake_case : int = argparse.Namespace()
with open(__lowerCAmelCase , 'r' ) as yaml_file:
try:
_snake_case : str = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
_snake_case : int = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith('imagenet1k_' ):
_snake_case : Optional[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : List[str] = 2_56
_snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_snake_case : List[str] = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : Tuple = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_snake_case : int = 1_51
_snake_case : str = 5_12
_snake_case : Optional[Any] = 'ade20k-id2label.json'
_snake_case : List[Any] = True
elif task_name.startswith('voc_' ):
_snake_case : List[str] = 21
_snake_case : Optional[int] = 5_12
_snake_case : Dict = 'pascal-voc-id2label.json'
_snake_case : str = True
# orig_config
_snake_case : Dict = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : int = getattr(__lowerCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__lowerCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : Union[str, Any] = getattr(__lowerCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Dict = getattr(__lowerCAmelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_snake_case : Optional[Any] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
_snake_case : str = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_snake_case : List[Any] = 'huggingface/label-files'
_snake_case : Any = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = dct.pop(__lowerCAmelCase )
_snake_case : List[str] = val
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
_snake_case : Optional[int] = ''
else:
_snake_case : Any = 'mobilevitv2.'
_snake_case : Optional[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : Tuple = k
if ".block." in k:
_snake_case : List[Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_snake_case : Any = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_snake_case : List[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_snake_case : Any = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_snake_case : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_snake_case : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Dict = [0, 1]
elif i == 4:
_snake_case : int = [0, 1, 2, 3]
elif i == 5:
_snake_case : Tuple = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_snake_case : Union[str, Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_snake_case : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_snake_case : Tuple = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_snake_case : Union[str, Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_snake_case : List[str] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_snake_case : Optional[int] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_snake_case : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_snake_case : Optional[Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_snake_case : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_snake_case : str = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_snake_case : str = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A__( __lowerCAmelCase ):
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( ):
_snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
_snake_case : Union[str, Any] = torch.load(__lowerCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_snake_case : str = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
_snake_case : int = False
else:
_snake_case : str = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
_snake_case : Optional[int] = False
# remove and rename some keys of load the original model
_snake_case : List[Any] = checkpoint
remove_unused_keys(__lowerCAmelCase )
_snake_case : Union[str, Any] = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : Optional[Any] = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
_snake_case : Tuple = outputs.logits
_snake_case : List[str] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : List[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 304
| 1
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase = '''path-to-your-trained-model'''
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase = '''A photo of sks dog in a bucket'''
_lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 401
|
"""simple docstring"""
from __future__ import annotations
_lowerCamelCase = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE : int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : List[str] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : Optional[int] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE : Tuple = (COULOMBS_CONSTANT * charge_product / abs(lowercase_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 8
|
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0')
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0')
if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise Exception('Years to repay must be an integer > 0')
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : Optional[int] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : List[str] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[str] ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=lowercase__ , )
assert hasattr(self , "env" )
def snake_case ( self : Union[str, Any] , lowercase__ : Optional[Any] ):
__lowercase : Optional[Any] = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__lowercase : str = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase__ , instance_count=lowercase__ , instance_type=self.instance_type , debugger_hook_config=lowercase__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase__ , py_version="py36" , )
def snake_case ( self : Dict , lowercase__ : Optional[Any] ):
TrainingJobAnalytics(lowercase__ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def snake_case ( self : Optional[Any] , lowercase__ : Optional[int] ):
# create estimator
__lowercase : Any = self.create_estimator(lowercase__ )
# run training
estimator.fit()
# result dataframe
__lowercase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowercase__ )
| 703
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) ->list[float]:
"""simple docstring"""
__lowercase ,__lowercase : Dict = coefficient_matrix.shape
__lowercase ,__lowercase : Tuple = constant_matrix.shape
if rowsa != colsa:
__lowercase : str = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if colsa != 1:
__lowercase : Dict = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if rowsa != rowsa:
__lowercase : Optional[Any] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != rowsa:
__lowercase : Dict = (
"Number of initial values must be equal to number of rows in coefficient "
F'matrix but received {len(_lowerCamelCase )} and {rowsa}'
)
raise ValueError(_lowerCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
__lowercase ,__lowercase : str = table.shape
strictly_diagonally_dominant(_lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCamelCase ):
__lowercase : List[Any] = []
for row in range(_lowerCamelCase ):
__lowercase : Optional[Any] = 0
for col in range(_lowerCamelCase ):
if col == row:
__lowercase : Tuple = table[row][col]
elif col == cols - 1:
__lowercase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowercase : List[Any] = (temp + val) / denom
new_val.append(_lowerCamelCase )
__lowercase : Dict = new_val
return [float(_lowerCamelCase ) for i in new_val]
def snake_case__ ( _lowerCamelCase ) ->bool:
"""simple docstring"""
__lowercase ,__lowercase : int = table.shape
__lowercase : Optional[int] = True
for i in range(0, _lowerCamelCase ):
__lowercase : Optional[int] = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 0
|
def _lowerCAmelCase ( A__: Dict , A__: Tuple , A__: Optional[int] , A__: Optional[int] , A__: int , A__: str ):
'''simple docstring'''
if index == r:
for j in range(__UpperCamelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 , __UpperCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _lowerCAmelCase ( A__: str , A__: Optional[Any] , A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 , __UpperCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__magic_name__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 254
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCamelCase = parse(importlib.metadata.version("torch"))
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__magic_name__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = parse(importlib.metadata.version(__UpperCamelCase ) )
return operation(__UpperCamelCase , parse(__UpperCamelCase ) )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
return compare_versions(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 490
| 0
|
'''simple docstring'''
from PIL import Image
def _UpperCamelCase ( lowerCAmelCase__: Image ) -> Image:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 238
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """levit"""
def __init__( self, _lowercase=224, _lowercase=3, _lowercase=3, _lowercase=2, _lowercase=1, _lowercase=16, _lowercase=[128, 256, 384], _lowercase=[4, 8, 12], _lowercase=[4, 4, 4], _lowercase=[16, 16, 16], _lowercase=0, _lowercase=[2, 2, 2], _lowercase=[2, 2, 2], _lowercase=0.02, **_lowercase, ) -> int:
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = stride
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = key_dim
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = attention_ratio
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ ( self ) -> float:
return 1E-4
| 238
| 1
|
"""simple docstring"""
import numpy as np
import datasets
_lowercase : Tuple = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_lowercase : Optional[int] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_lowercase : Any = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def a ( self : List[Any] , _lowercase : Dict , _lowercase : int ):
# convert to numpy arrays
__UpperCAmelCase = np.array(_lowercase )
__UpperCAmelCase = np.array(_lowercase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
__UpperCAmelCase = X - np.mean(_lowercase )
__UpperCAmelCase = np.cov(reference_distribution.T )
try:
__UpperCAmelCase = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
__UpperCAmelCase = np.linalg.pinv(_lowercase )
__UpperCAmelCase = np.dot(_lowercase , _lowercase )
__UpperCAmelCase = np.dot(_lowercase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 49
|
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( __lowercase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __lowercase , )
| 612
| 0
|
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__ = '''path-to-your-trained-model'''
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
UpperCamelCase__ = '''A photo of sks dog in a bucket'''
UpperCamelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 143
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
def __init__( self : List[Any] , *lowercase__ : Tuple , **lowercase__ : List[Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 143
| 1
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__snake_case, '''_dynamo''' ):
return False
return isinstance(__snake_case, torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( __snake_case, __snake_case = True ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase = is_compiled_module(__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case, __snake_case ):
_UpperCamelCase = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase = getattr(__snake_case, '''forward''' )
_UpperCamelCase = model.__dict__.pop('''_original_forward''', __snake_case )
if original_forward is not None:
while hasattr(__snake_case, '''__wrapped__''' ):
_UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase = forward
if getattr(__snake_case, '''_converted_to_transformer_engine''', __snake_case ):
convert_model(__snake_case, to_transformer_engine=__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = compiled_model
return model
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
PartialState().wait_for_everyone()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case, __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case, __snake_case )
@contextmanager
def lowerCamelCase__ ( **__snake_case ) -> Tuple:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(__snake_case, '''__qualname__''' ) and not hasattr(__snake_case, '''__name__''' ):
_UpperCamelCase = getattr(__snake_case, '''__class__''', __snake_case )
if hasattr(__snake_case, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case, '''__name__''' ):
return obj.__name__
return str(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = destination.setdefault(__snake_case, {} )
merge_dicts(__snake_case, __snake_case )
else:
_UpperCamelCase = value
return destination
def lowerCamelCase__ ( __snake_case = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 19
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCamelCase__ ( UpperCAmelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase = Features({"""text""": Value("""string""" )} )
lowerCAmelCase = Features({"""summary""": Value("""string""" )} )
lowerCAmelCase = "text"
lowerCAmelCase = "summary"
@property
def __a ( self : Optional[Any] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 91
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __a ( self : Dict , _lowercase : int , _lowercase : Any , _lowercase : int ):
A = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
A = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 )
A = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
for example in examples:
A = video_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{'score': ANY(_lowercase ), 'label': ANY(_lowercase )},
{'score': ANY(_lowercase ), 'label': ANY(_lowercase )},
] , )
@require_torch
def __a ( self : str ):
A = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
A = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
A = pipeline(
'video-classification' , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
A = video_classifier(_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def __a ( self : Dict ):
pass
| 91
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A , A ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A , unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 0.0 , A = 50 , A = "pil" , A = True , **A , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
lowerCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowerCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase = self.scheduler.scale_model_input(A , A )
# predict the noise residual
lowerCamelCase = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(A , A , A , **A ).prev_sample
# decode the image latents with the VAE
lowerCamelCase = self.vqvae.decode(A ).sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 457
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowerCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowerCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowerCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowerCamelCase__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowerCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
def fn(lowerCamelCase__ : Any ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase = dataset.select(range(lowerCamelCase__ ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase = tokenize_function(lowerCamelCase__ )
lowerCamelCase = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ : List[str] ):
# Concatenate all texts.
lowerCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1000 , num_proc=4 )
lowerCamelCase = 0
lowerCamelCase = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase = os.path.join(lowerCamelCase__ , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("""Wrote file {} containing {} records""".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = parse_args()
main(args)
| 457
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase_: Tuple = (7_2_0, 1_2_8_0) # Height, Width
lowerCAmelCase_: List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase_: List[Any] = 1 / 1_0_0
lowerCAmelCase_: List[str] = ""
lowerCAmelCase_: List[str] = ""
lowerCAmelCase_: Optional[int] = ""
lowerCAmelCase_: List[str] = 2_5_0
def __a ( ):
'''simple docstring'''
lowercase__ , lowercase__ = get_dataset(A , A )
for index in range(A ):
lowercase__ = random.sample(range(len(A ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ = update_image_and_anno(
A , A , A , A , A , filter_scale=A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ = random_chars(32 )
lowercase__ = path.split(os.sep )[-1].rsplit("." , 1 )[0]
lowercase__ = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase__ = []
for anno in new_annos:
lowercase__ = anno[3] - anno[1]
lowercase__ = anno[4] - anno[2]
lowercase__ = anno[1] + width / 2
lowercase__ = anno[2] + height / 2
lowercase__ = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(A )
with open(f'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
for label_file in glob.glob(os.path.join(A , "*.txt" ) ):
lowercase__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(A ) as in_file:
lowercase__ = in_file.readlines()
lowercase__ = os.path.join(A , f'''{label_name}.jpg''' )
lowercase__ = []
for obj_list in obj_lists:
lowercase__ = obj_list.rstrip("\n" ).split(" " )
lowercase__ = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A )
labels.append(A )
return img_paths, labels
def __a ( A , A , A , A , A , A = 0.0 , ):
'''simple docstring'''
lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = int(scale_x * output_size[1] )
lowercase__ = int(scale_y * output_size[0] )
lowercase__ = []
lowercase__ = []
for i, index in enumerate(A ):
lowercase__ = all_img_list[index]
path_list.append(A )
lowercase__ = all_annos[index]
lowercase__ = cva.imread(A )
if i == 0: # top-left
lowercase__ = cva.resize(A , (divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = bbox[2] * scale_y
lowercase__ = bbox[3] * scale_x
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ = cva.resize(A , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = bbox[2] * scale_y
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ = cva.resize(A , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = bbox[3] * scale_x
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ = cva.resize(
A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __a ( A ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase__ = ascii_lowercase + digits
return "".join(random.choice(A ) for _ in range(A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 668
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668
| 1
|
'''simple docstring'''
lowercase__ : List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
_UpperCamelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](lowercase, lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase__ : Tuple = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 98
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37
| 0
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[int],__A : Union[str, Any]=0.01,__A : str=1_0_0_0 ):
_lowerCamelCase : Any = p_stop
_lowerCamelCase : Tuple = max_length
def __iter__( self : Tuple ):
_lowerCamelCase : int = 0
_lowerCamelCase : str = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : List[Any] = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str],__A : Union[str, Any]=False,__A : Dict=True ):
_lowerCamelCase : Tuple = [
BatchSamplerShard(__A,2,__A,split_batches=__A,even_batches=__A )
for i in range(2 )
]
_lowerCamelCase : Dict = [list(__A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__A ) for shard in batch_sampler_shards],[len(__A ) for e in expected] )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : int = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : int = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Any = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : int = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Any = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : int = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
def lowerCamelCase_ ( self : Dict ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : str = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : List[str] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : List[str] = [BatchSamplerShard(__A,2,__A,even_batches=__A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ),3 )
self.assertEqual(len(batch_sampler_shards[1] ),2 )
self.assertListEqual(list(batch_sampler_shards[0] ),[[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ),[[3, 4], [9, 1_0, 1_1]] )
def lowerCamelCase_ ( self : int,__A : List[str],__A : int,__A : List[str],__A : Optional[int]=False,__A : List[str]=2,__A : Optional[Any]=False ):
random.seed(__A )
_lowerCamelCase : str = list(__A )
_lowerCamelCase : List[str] = [
IterableDatasetShard(
__A,batch_size=__A,drop_last=__A,num_processes=__A,process_index=__A,split_batches=__A,)
for i in range(__A )
]
_lowerCamelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__A )
iterable_dataset_lists.append(list(__A ) )
_lowerCamelCase : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__A ),len(__A ) )
self.assertTrue(len(__A ) % shard_batch_size == 0 )
_lowerCamelCase : Optional[int] = []
for idx in range(0,len(__A ),__A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__A ) < len(__A ):
reference += reference
self.assertListEqual(__A,reference[: len(__A )] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = 4_2
_lowerCamelCase : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
# Edge case with a very small dataset
_lowerCamelCase : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = BatchSampler(range(1_6 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = SkipBatchSampler(__A,2 )
self.assertListEqual(list(__A ),[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = SkipDataLoader(list(range(1_6 ) ),batch_size=4,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = DataLoader(list(range(1_6 ) ),batch_size=4 )
_lowerCamelCase : List[str] = skip_first_batches(__A,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = DataLoaderShard(list(range(1_6 ) ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
def lowerCamelCase_ ( self : Dict ):
Accelerator()
_lowerCamelCase : List[Any] = DataLoaderDispatcher(range(1_6 ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
| 11
|
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 11
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number | (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__a : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : int = CLIPConfig
__a : Optional[int] = ['''CLIPEncoderLayer''']
def __init__( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
__lowercase = CLIPVisionModelWithProjection(config.vision_config )
__lowercase = nn.Linear(config.vision_config.projection_dim , 1 )
__lowercase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.5 , lowerCAmelCase__=0.5 ) -> List[Any]:
'''simple docstring'''
__lowercase = self.vision_model(lowerCAmelCase__ )[0]
__lowercase = self.p_head(lowerCAmelCase__ )
__lowercase = nsfw_detected.flatten()
__lowercase = nsfw_detected > p_threshold
__lowercase = nsfw_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase__ ):
if nsfw_detected_:
__lowercase = np.zeros(images[idx].shape )
__lowercase = self.w_head(lowerCAmelCase__ )
__lowercase = watermark_detected.flatten()
__lowercase = watermark_detected > w_threshold
__lowercase = watermark_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(lowerCAmelCase__ ):
if watermark_detected_:
__lowercase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 522
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowercase = deepcopy(lowerCAmelCase__ )
elif os.path.exists(lowerCAmelCase__ ):
with io.open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = json.load(lowerCAmelCase__ )
else:
try:
__lowercase = baseaa.urlsafe_baadecode(lowerCAmelCase__ ).decode('''utf-8''' )
__lowercase = json.loads(lowerCAmelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
__lowercase = config
self.set_stage_and_offload()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
__lowercase = False
if self.is_zeroa() or self.is_zeroa():
__lowercase = set(['''cpu''', '''nvme'''] )
__lowercase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowercase = True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
__lowercase = nodes.pop()
for node in nodes:
__lowercase = config.get(lowerCAmelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.find_config_node(lowerCAmelCase__ )
if config is None:
return default
return config.get(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
for node in nodes:
__lowercase = config
__lowercase = config.get(lowerCAmelCase__ )
if config is None:
if must_exist:
raise ValueError(F"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_value(lowerCAmelCase__ )
return False if value is None else bool(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.get_value(lowerCAmelCase__ )
return False if value is None else not bool(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._stage == 2
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._stage == 3
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return self._offload
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = engine
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
self.engine.backward(lowerCAmelCase__ , **lowerCAmelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , device_placement=lowerCAmelCase__ , scaler=lowerCAmelCase__ )
__lowercase = hasattr(self.optimizer , '''overflow''' )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=None ) -> List[Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=0.001 , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = params
__lowercase = lr
__lowercase = weight_decay
__lowercase = kwargs
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = optimizer
__lowercase = total_num_steps
__lowercase = warmup_num_steps
__lowercase = kwargs
| 522
| 1
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'data2vec-audio'
def __init__( self , snake_case_=32 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="gelu" , snake_case_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(10, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=16 , snake_case_=19 , snake_case_=5 , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_56 , snake_case_=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_12 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
lowercase =hidden_size
lowercase =feat_extract_activation
lowercase =list(snake_case_ )
lowercase =list(snake_case_ )
lowercase =list(snake_case_ )
lowercase =conv_bias
lowercase =num_conv_pos_embeddings
lowercase =num_conv_pos_embedding_groups
lowercase =conv_pos_kernel_size
lowercase =len(self.conv_dim )
lowercase =num_hidden_layers
lowercase =intermediate_size
lowercase =hidden_act
lowercase =num_attention_heads
lowercase =hidden_dropout
lowercase =attention_dropout
lowercase =activation_dropout
lowercase =feat_proj_dropout
lowercase =final_dropout
lowercase =layerdrop
lowercase =layer_norm_eps
lowercase =initializer_range
lowercase =vocab_size
lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase =mask_time_prob
lowercase =mask_time_length
lowercase =mask_time_min_masks
lowercase =mask_feature_prob
lowercase =mask_feature_length
lowercase =mask_feature_min_masks
# ctc loss
lowercase =ctc_loss_reduction
lowercase =ctc_zero_infinity
# adapter
lowercase =add_adapter
lowercase =adapter_kernel_size
lowercase =adapter_stride
lowercase =num_adapter_layers
lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase =list(snake_case_ )
lowercase =list(snake_case_ )
lowercase =list(snake_case_ )
lowercase =xvector_output_dim
@property
def _A( self ):
return math.prod(self.conv_stride )
| 72
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 'levit'
def __init__( self : Union[str, Any] , __snake_case : str=224 , __snake_case : Optional[Any]=3 , __snake_case : str=3 , __snake_case : Optional[Any]=2 , __snake_case : List[str]=1 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]=[128, 256, 384] , __snake_case : Optional[Any]=[4, 8, 12] , __snake_case : str=[4, 4, 4] , __snake_case : int=[16, 16, 16] , __snake_case : Union[str, Any]=0 , __snake_case : int=[2, 2, 2] , __snake_case : Dict=[2, 2, 2] , __snake_case : Optional[Any]=0.02 , **__snake_case : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
lowerCamelCase = image_size
lowerCamelCase = num_channels
lowerCamelCase = kernel_size
lowerCamelCase = stride
lowerCamelCase = padding
lowerCamelCase = hidden_sizes
lowerCamelCase = num_attention_heads
lowerCamelCase = depths
lowerCamelCase = key_dim
lowerCamelCase = drop_path_rate
lowerCamelCase = patch_size
lowerCamelCase = attention_ratio
lowerCamelCase = mlp_ratio
lowerCamelCase = initializer_range
lowerCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 246
| 0
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = None
@experimental
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return _map_with_joblib(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =num_proc if num_proc <= len(_lowerCAmelCase ) else len(_lowerCAmelCase )
__lowercase =[] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCAmelCase ):
__lowercase =len(_lowerCAmelCase ) // num_proc
__lowercase =len(_lowerCAmelCase ) % num_proc
__lowercase =div * index + min(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(_lowerCAmelCase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(_lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
__lowercase , __lowercase =None, None
if not disable_tqdm:
__lowercase , __lowercase =(RLock(),), tqdm.set_lock
with Pool(_lowerCAmelCase , initargs=_lowerCAmelCase , initializer=_lowerCAmelCase ) as pool:
__lowercase =pool.map(_lowerCAmelCase , _lowerCAmelCase )
logger.info(f"""Finished {num_proc} processes""" )
__lowercase =[obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(_lowerCAmelCase )} objects""" )
return mapped
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__lowercase =None
| 454
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = BartTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str="replace" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Dict="</s>" , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Dict="<mask>" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase ='post_processor'
__lowercase =getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
if tokenizer_component_instance:
__lowercase =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase =tuple(state['sep'])
if "cls" in state:
__lowercase =tuple(state['cls'])
__lowercase =False
if state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =add_prefix_space
__lowercase =True
if state.get('trim_offsets' , _lowerCAmelCase) != trim_offsets:
__lowercase =trim_offsets
__lowercase =True
if changes_to_apply:
__lowercase =getattr(_lowerCAmelCase , state.pop('type'))
__lowercase =component_class(**_lowerCAmelCase)
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else value
__lowercase =value
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None):
'''simple docstring'''
__lowercase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 454
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : str = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{src_lang}-{tgt_lang}'''
SCREAMING_SNAKE_CASE : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = os.path.join(_SCREAMING_SNAKE_CASE , '''README.md''' )
print(F'''Generating {path}''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
snake_case_ = Path(__file__).resolve().parent.parent.parent
snake_case_ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 507
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :bool = True ):
SCREAMING_SNAKE_CASE : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , '''forward''' )
SCREAMING_SNAKE_CASE : Tuple = model.__dict__.pop('''_original_forward''' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE : Tuple = forward
if getattr(_SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : List[Any] = model
SCREAMING_SNAKE_CASE : int = compiled_model
return model
def __lowercase ():
PartialState().wait_for_everyone()
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __lowercase (**_SCREAMING_SNAKE_CASE :str ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE : str = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
if not hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
SCREAMING_SNAKE_CASE : str = getattr(_SCREAMING_SNAKE_CASE , '''__class__''' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = value
return destination
def __lowercase (_SCREAMING_SNAKE_CASE :int = None ):
if port is None:
SCREAMING_SNAKE_CASE : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 507
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowerCAmelCase ( A_ : str , A_ : Dict=False ) -> int:
__UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : List[str] , A_ : Tuple=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase = ""
else:
__UpperCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( A_ : Tuple , A_ : Optional[int] , A_ : List[Any] ) -> Optional[Any]:
__UpperCAmelCase = dct.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( ) -> Optional[int]:
__UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( A_ : Dict , A_ : Optional[int] ) -> List[str]:
__UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase = 10_00
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = "imagenet-1k-id2label.json"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = int(deit_name[-6:-4] )
__UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
__UpperCAmelCase = 1_92
__UpperCAmelCase = 7_68
__UpperCAmelCase = 12
__UpperCAmelCase = 3
elif deit_name[9:].startswith("small" ):
__UpperCAmelCase = 3_84
__UpperCAmelCase = 15_36
__UpperCAmelCase = 12
__UpperCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
__UpperCAmelCase = 10_24
__UpperCAmelCase = 40_96
__UpperCAmelCase = 24
__UpperCAmelCase = 16
# load original model from timm
__UpperCAmelCase = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase = timm_model.state_dict()
__UpperCAmelCase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
# load HuggingFace model
__UpperCAmelCase = DeiTForImageClassificationWithTeacher(A_ ).eval()
model.load_state_dict(A_ )
# Check outputs on an image, prepared by DeiTImageProcessor
__UpperCAmelCase = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__UpperCAmelCase = DeiTImageProcessor(size=A_ , crop_size=config.image_size )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__UpperCAmelCase = encoding["pixel_values"]
__UpperCAmelCase = model(A_ )
__UpperCAmelCase = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 286
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = DiTPipeline
lowerCAmelCase__ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowerCAmelCase__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ : Optional[int] = False
def _UpperCAmelCase ( self: str ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__lowerCAmelCase , )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = DDIMScheduler()
__UpperCAmelCase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCAmelCase ( self: str , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int]=0 ) -> int:
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
__UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__UpperCAmelCase = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self: Tuple ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = "cpu"
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
__UpperCAmelCase = pipe(**__lowerCAmelCase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self: int ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella", "white shark", "white wolf"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 286
| 1
|
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
while b > 0:
if b & 1:
__SCREAMING_SNAKE_CASE : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 74
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3
| 0
|
import requests
__UpperCamelCase : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here!
__UpperCamelCase : Optional[int] = """https://api.openweathermap.org/data/2.5/"""
def snake_case ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def snake_case ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def snake_case ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__UpperCamelCase : List[Any] = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> list[int]:
if length <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(snake_case_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 345
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase : Union[str, Any] = ""
__lowerCamelCase : Dict = ""
__lowerCamelCase : Optional[int] = ""
__lowerCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ , A__ : Optional[int] =get_dataset(snake_case_, snake_case_ )
print('''Processing...''' )
A__ , A__ , A__ : List[Any] =update_image_and_anno(snake_case_, snake_case_, snake_case_ )
for index, image in enumerate(snake_case_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : List[Any] =random_chars(3_2 )
A__ : Union[str, Any] =paths[index].split(os.sep )[-1].rsplit('''.''', 1 )[0]
A__ : Any =f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg', snake_case_, [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Success {index+1}/{len(snake_case_ )} with {file_name}' )
A__ : str =[]
for anno in new_annos[index]:
A__ : Optional[Any] =f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(snake_case_ )
with open(f'/{file_root}.txt', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[list, list]:
A__ : int =[]
A__ : int =[]
for label_file in glob.glob(os.path.join(snake_case_, '''*.txt''' ) ):
A__ : Optional[int] =label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(snake_case_ ) as in_file:
A__ : Union[str, Any] =in_file.readlines()
A__ : Union[str, Any] =os.path.join(snake_case_, f'{label_name}.jpg' )
A__ : Optional[Any] =[]
for obj_list in obj_lists:
A__ : Optional[int] =obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1 ) -> tuple[list, list, list]:
A__ : List[Any] =[]
A__ : List[str] =[]
A__ : Any =[]
for idx in range(len(snake_case_ ) ):
A__ : int =[]
A__ : Any =img_list[idx]
path_list.append(snake_case_ )
A__ : str =anno_list[idx]
A__ : List[str] =cva.imread(snake_case_ )
if flip_type == 1:
A__ : Optional[int] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : List[str] =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ : List[str] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : Optional[int] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case_ )
new_imgs_list.append(snake_case_ )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ : Optional[int] =ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 416
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
snake_case_ : int = [8, 5, 9, 7]
snake_case_ : Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case_ : List[str] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , ):
"""simple docstring"""
UpperCAmelCase_ = claim_vector
UpperCAmelCase_ = allocated_resources_table
UpperCAmelCase_ = maximum_claim_table
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def lowerCamelCase ( self : Any):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_snake_case))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def lowerCamelCase ( self : int):
"""simple docstring"""
return {self.__need().index(_snake_case): i for i in self.__need()}
def lowerCamelCase ( self : Union[str, Any] , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.__need()
UpperCAmelCase_ = self.__allocated_resources_table
UpperCAmelCase_ = self.__available_resources()
UpperCAmelCase_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''')
while need_list:
UpperCAmelCase_ = False
for each_need in need_list:
UpperCAmelCase_ = True
for index, need in enumerate(_snake_case):
if need > available_resources[index]:
UpperCAmelCase_ = False
break
if execution:
UpperCAmelCase_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase_ = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_snake_case)
# update available/freed resources stack
UpperCAmelCase_ = np.array(_snake_case) + np.array(
alloc_resources_table[process_number])
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_snake_case) for x in available_resources]))
break
if safe:
print('''The process is in a safe state.\n''')
else:
print('''System in unsafe state. Aborting...\n''')
break
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''')
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_snake_case) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item)
+ '''\n''')
print(''' ''' * 9 + '''System Resource Table''')
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_snake_case) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item)
+ '''\n''')
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_snake_case) for x in self.__claim_vector))
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_snake_case) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
snake_case_ : Tuple = False
class __snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
image=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 169
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :int )->Tuple:
'''simple docstring'''
try:
with open(_UpperCamelCase , "rb" ) as flax_state_f:
snake_case_ = from_bytes(_UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_UpperCamelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple , lowerCAmelCase_ :Any )->Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
snake_case_ = jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
snake_case_ = ''''''
snake_case_ = flatten_dict(_UpperCamelCase , sep="." )
snake_case_ = pt_model.state_dict()
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
snake_case_ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
snake_case_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_UpperCamelCase ):
snake_case_ = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
snake_case_ = '''.'''.join(_UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
snake_case_ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
return pt_model
| 283
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : List[str] = IFInpaintingPipeline
A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCamelCase ( self ):
return self._get_dummy_components()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
self._test_save_load_local()
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 319
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 716
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = """roc_bert"""
def __init__( self , lowerCAmelCase=3_05_22 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=7_68 , lowerCAmelCase=9_10 , lowerCAmelCase=5_12 , lowerCAmelCase=2_48_58 , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = use_cache
snake_case = enable_pronunciation
snake_case = enable_shape
snake_case = pronunciation_embed_dim
snake_case = pronunciation_vocab_size
snake_case = shape_embed_dim
snake_case = shape_vocab_size
snake_case = concat_input
snake_case = position_embedding_type
snake_case = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 104
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , _lowercase : Optional[Any] , _lowercase : str=13 , _lowercase : Optional[Any]=7 , _lowercase : Tuple=True , _lowercase : Optional[int]=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : Optional[int]=32 , _lowercase : str=5 , _lowercase : Optional[int]=4 , _lowercase : Any=37 , _lowercase : Dict="gelu" , _lowercase : Dict=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=16 , _lowercase : Tuple=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Any=4 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_choices
def a ( self : int ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_attention_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a ( self : Dict ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = True
__UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a ( self : List[str] ):
__UpperCAmelCase = FlaxRobertaModelTester(self )
@slow
def a ( self : Tuple ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_lowercase )
__UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 49
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
SCREAMING_SNAKE_CASE_ = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , '''README.md''' )
print(F"Generating {path}" )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE =Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =model_name.split("""-""")
__SCREAMING_SNAKE_CASE =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 234
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (PNDMScheduler,)
lowercase = (('num_inference_steps', 50),)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def A__ ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 700
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35
| 0
|
def a__ ( A_, A_ ):
'''simple docstring'''
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a, lowerCamelCase__ )
def a__ ( A_, A_ ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__magic_name__ , __magic_name__ = y, x % y
return abs(lowerCamelCase__ )
def a__ ( ):
'''simple docstring'''
try:
__magic_name__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
__magic_name__ = int(nums[0] )
__magic_name__ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(lowerCamelCase__, lowerCamelCase__ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__, lowerCamelCase__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 529
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=3 , snake_case__ : int=18 , snake_case__ : Dict=30 , snake_case__ : Optional[Any]=400 , snake_case__ : Tuple=None , snake_case__ : List[Any]=True , snake_case__ : Any=True , snake_case__ : Dict=None , ):
lowerCAmelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = do_convert_rgb
lowerCAmelCase__ = [512, 1024, 2048, 4096]
lowerCAmelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def _SCREAMING_SNAKE_CASE ( self : str ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = PixaStructImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_convert_rgb""" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.image_processor_tester.prepare_dummy_image()
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ = 2048
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : str ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowerCAmelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
lowerCAmelCase__ = """Hello"""
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCAmelCase__ = 3
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_convert_rgb""" ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 644
| 0
|
import functools
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase, UpperCAmelCase ) or not all(isinstance(UpperCAmelCase, UpperCAmelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase, UpperCAmelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowercase = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import logging
from transformers import PretrainedConfig
A_ = logging.getLogger(__name__)
A_ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __lowercase ( _A ):
lowercase = 'bertabs'
def __init__( self : Dict , __lowerCamelCase : Tuple=3_05_22 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=6 , __lowerCamelCase : int=7_68 , __lowerCamelCase : int=8 , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : Union[str, Any]=0.2 , **__lowerCamelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 479
| 0
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase : Dict = logging.getLogger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : str=None, UpperCamelCase__ : Tuple=None ) -> List[str]:
_A = self.layer[current_layer](UpperCamelCase__, UpperCamelCase__, head_mask[current_layer] )
_A = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , _UpperCamelCase , )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : Tuple ) -> List[Any]:
super().__init__(UpperCamelCase__ )
_A = BertEncoderWithPabee(UpperCamelCase__ )
self.init_weights()
_A = 0
_A = 0
_A = 0
_A = 0
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int] ) -> Dict:
_A = threshold
def __UpperCAmelCase ( self : int, UpperCamelCase__ : List[Any] ) -> List[str]:
_A = patience
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_A = 0
_A = 0
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_A = self.inference_layers_num / self.inference_instances_num
_A = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Any=False, ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_A = input_ids.size()
elif inputs_embeds is not None:
_A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_A = torch.ones(UpperCamelCase__, device=UpperCamelCase__ )
if token_type_ids is None:
_A = torch.zeros(UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_A = self.get_extended_attention_mask(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_A , _A , _A = encoder_hidden_states.size()
_A = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_A = torch.ones(UpperCamelCase__, device=UpperCamelCase__ )
_A = self.invert_attention_mask(UpperCamelCase__ )
else:
_A = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_A = self.get_head_mask(UpperCamelCase__, self.config.num_hidden_layers )
_A = self.embeddings(
input_ids=UpperCamelCase__, position_ids=UpperCamelCase__, token_type_ids=UpperCamelCase__, inputs_embeds=UpperCamelCase__ )
_A = embedding_output
if self.training:
_A = []
for i in range(self.config.num_hidden_layers ):
_A = self.encoder.adaptive_forward(
UpperCamelCase__, current_layer=UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__ )
_A = self.pooler(UpperCamelCase__ )
_A = output_layers[i](output_dropout(UpperCamelCase__ ) )
res.append(UpperCamelCase__ )
elif self.patience == 0: # Use all layers for inference
_A = self.encoder(
UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, )
_A = self.pooler(encoder_outputs[0] )
_A = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase__ )]
else:
_A = 0
_A = None
_A = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_A = self.encoder.adaptive_forward(
UpperCamelCase__, current_layer=UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__ )
_A = self.pooler(UpperCamelCase__ )
_A = output_layers[i](UpperCamelCase__ )
if regression:
_A = logits.detach()
if patient_result is not None:
_A = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_A = 0
else:
_A = logits.detach().argmax(dim=1 )
if patient_result is not None:
_A = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase__ ) ):
patient_counter += 1
else:
_A = 0
_A = logits
if patient_counter == self.patience:
break
_A = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , _UpperCamelCase , )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : List[str] ) -> Any:
super().__init__(UpperCamelCase__ )
_A = config.num_labels
_A = BertModelWithPabee(UpperCamelCase__ )
_A = nn.Dropout(config.hidden_dropout_prob )
_A = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : List[str]=None, ) -> str:
_A = self.bert(
input_ids=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, position_ids=UpperCamelCase__, head_mask=UpperCamelCase__, inputs_embeds=UpperCamelCase__, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, )
_A = (logits[-1],)
if labels is not None:
_A = None
_A = 0
for ix, logits_item in enumerate(UpperCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
_A = MSELoss()
_A = loss_fct(logits_item.view(-1 ), labels.view(-1 ) )
else:
_A = CrossEntropyLoss()
_A = loss_fct(logits_item.view(-1, self.num_labels ), labels.view(-1 ) )
if total_loss is None:
_A = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_A = (total_loss / total_weights,) + outputs
return outputs
| 107
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = ['model.decoder.embed_positions.weights']
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
if "emb" in name:
__UpperCamelCase : Any = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__UpperCamelCase : str = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__UpperCamelCase : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__UpperCamelCase : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__UpperCamelCase : Optional[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__UpperCamelCase : Tuple = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__UpperCamelCase : List[str] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__UpperCamelCase : Tuple = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__UpperCamelCase : Optional[int] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase : Tuple = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : OrderedDict , __lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
__UpperCamelCase : Tuple = list(state_dict.keys() )
__UpperCamelCase : List[Any] = {}
for key in keys:
__UpperCamelCase : Optional[Any] = state_dict.pop(__lowerCAmelCase )
__UpperCamelCase : Dict = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase : Optional[Any] = val[:hidden_size, :]
__UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase : Dict = val
else:
__UpperCamelCase : Any = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( __lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__UpperCamelCase : int = 1024
__UpperCamelCase : Union[str, Any] = 24
__UpperCamelCase : int = 16
elif checkpoint == "medium":
__UpperCamelCase : List[Any] = 1536
__UpperCamelCase : Dict = 48
__UpperCamelCase : Dict = 24
elif checkpoint == "large":
__UpperCamelCase : List[Any] = 2048
__UpperCamelCase : str = 48
__UpperCamelCase : Optional[int] = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__UpperCamelCase : Any = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str="cpu" ) -> Optional[int]:
__UpperCamelCase : str = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCamelCase : int = decoder_config_from_checkpoint(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
__UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained("""t5-base""" )
__UpperCamelCase : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__UpperCamelCase : List[str] = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase : Tuple = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
__UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase : int = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""t5-base""" )
__UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__UpperCamelCase : Any = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
__UpperCamelCase : Tuple = 2048
__UpperCamelCase : int = 2048
# set other default generation config params
__UpperCamelCase : str = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
UpperCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269
| 0
|
import os
import sys
_lowerCamelCase = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_lowerCamelCase = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
"""simple docstring"""
return AutoConfig.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return AutoModel.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : Any , **__lowerCamelCase : int ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *__lowerCamelCase : str , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_A , **_A )
| 709
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client("iam" )
__SCREAMING_SNAKE_CASE : List[Any] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
__SCREAMING_SNAKE_CASE : str = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = botoa.client("iam" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , __lowerCamelCase , )
__SCREAMING_SNAKE_CASE : str = None
if credentials_configuration == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
__SCREAMING_SNAKE_CASE : Any = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
__SCREAMING_SNAKE_CASE : Dict = _ask_field("AWS Access Key ID: " )
__SCREAMING_SNAKE_CASE : Union[str, Any] = aws_access_key_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field("AWS Secret Access Key: " )
__SCREAMING_SNAKE_CASE : Optional[int] = aws_secret_access_key
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
__SCREAMING_SNAKE_CASE : Any = aws_region
__SCREAMING_SNAKE_CASE : int = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , __lowerCamelCase , )
if role_management == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your IAM role name: " )
else:
__SCREAMING_SNAKE_CASE : Any = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Tuple = None
if is_custom_docker_image:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your Docker image: " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : List[Any] = None
if is_sagemaker_inputs_enabled:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : int = None
if is_sagemaker_metrics_enabled:
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_dynamo:
__SCREAMING_SNAKE_CASE : Optional[Any] = "dynamo_"
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_custom_options:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Which mode do you want to use?" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="default" , )
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Optional[int] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="ml.p3.2xlarge" )
__SCREAMING_SNAKE_CASE : List[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"How many machines do you want use? [1]: " , __lowerCamelCase , default=1 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 447
| 0
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def lowercase__(A ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def lowercase__(A , A , A ) ->np.ndarray:
"""simple docstring"""
lowercase__ : Dict= np.nan
for i in range(A ):
lowercase__ : str= features[:, labels == i]
lowercase__ : int= data.mean(1 )
# Centralize the data of class i
lowercase__ : List[Any]= data - column_reshape(A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Union[str, Any]= np.dot(A , centered_data.T )
return covariance_sum / features.shape[1]
def lowercase__(A , A , A ) ->np.ndarray:
"""simple docstring"""
lowercase__ : Any= features.mean(1 )
lowercase__ : Optional[int]= np.nan
for i in range(A ):
lowercase__ : Any= features[:, labels == i]
lowercase__ : List[str]= data.shape[1]
lowercase__ : str= data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Union[str, Any]= device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
return covariance_sum / features.shape[1]
def lowercase__(A , A ) ->np.ndarray:
"""simple docstring"""
if features.any():
lowercase__ : Tuple= features.mean(1 )
# Center the dataset
lowercase__ : Any= features - np.reshape(A , (data_mean.size, 1) )
lowercase__ : int= np.dot(A , centered_data.T ) / features.shape[1]
lowercase__, lowercase__ : Tuple= np.linalg.eigh(A )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : str= eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Optional[Any]= np.dot(filtered_eigenvectors.T , A )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def lowercase__(A , A , A , A ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__, lowercase__ : Optional[int]= eigh(
covariance_between_classes(A , A , A ) , covariance_within_classes(A , A , A ) , )
lowercase__ : Optional[int]= eigenvectors[:, ::-1][:, :dimensions]
lowercase__, lowercase__, lowercase__ : Tuple= np.linalg.svd(A )
lowercase__ : Optional[int]= svd_matrix[:, 0:dimensions]
lowercase__ : Dict= np.dot(filtered_svd_matrix.T , A )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : Optional[Any]= np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : int= np.array([0, 0, 0, 1, 1] )
lowercase__ : str= 2
lowercase__ : Union[str, Any]= 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A ) as error_info:
lowercase__ : Union[str, Any]= linear_discriminant_analysis(
A , A , A , A )
if isinstance(A , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : List[Any]= np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : int= 2
lowercase__ : List[Any]= np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(A ) as error_info:
lowercase__ : Dict= principal_component_analysis(A , A )
if not np.allclose(A , A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : List[str] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
a : List[Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
a : Optional[int] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
a : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
a : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 218
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[str]] , lowerCAmelCase: int , ) -> None:
_UpperCAmelCase : int = len(lowerCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase , lowerCAmelCase , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> None:
_UpperCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCAmelCase , lowerCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase )
print("" )
print(len(lowerCAmelCase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 467
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
SCREAMING_SNAKE_CASE_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
SCREAMING_SNAKE_CASE_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
SCREAMING_SNAKE_CASE_ = 'allenai'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase : Optional[int] = dict((re.sub(R"@@$" , "" , lowerCAmelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_UpperCAmelCase : Any = d[k] # restore
return da
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> int:
# prep
assert os.path.exists(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCAmelCase : Optional[Any] = basename(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = dirname(lowerCAmelCase )
_UpperCAmelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_UpperCAmelCase : Optional[int] = cls.hub_models()
_UpperCAmelCase : Dict = {"bpe": "fastbpe", "tokenizer": "moses"}
_UpperCAmelCase : Tuple = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
_UpperCAmelCase : int = hub_utils.from_pretrained(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , archive_map=lowerCAmelCase , **lowerCAmelCase )
_UpperCAmelCase : Any = vars(chkpt["args"]["model"] )
_UpperCAmelCase : Optional[int] = args["source_lang"]
_UpperCAmelCase : Dict = args["target_lang"]
_UpperCAmelCase : int = dirname(lowerCAmelCase )
_UpperCAmelCase : str = basename(lowerCAmelCase )
# dicts
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , F'dict.{src_lang}.txt' )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , F'dict.{tgt_lang}.txt' )
_UpperCAmelCase : Union[str, Any] = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : Any = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase : int = len(lowerCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_UpperCAmelCase : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
_UpperCAmelCase : Optional[int] = False
break
_UpperCAmelCase : Dict = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : List[str] = rewrite_dict_keys(tgt_dict.indices )
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
break
with open(lowerCAmelCase , encoding="utf-8" ) as fin:
_UpperCAmelCase : List[str] = fin.read()
_UpperCAmelCase : str = re.sub(R" \d+$" , "" , lowerCAmelCase , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCAmelCase )
# model config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
_UpperCAmelCase : Union[str, Any] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
_UpperCAmelCase : Union[str, Any] = 5
_UpperCAmelCase : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_UpperCAmelCase : str = best_score_hparams[model_dir]["length_penalty"]
else:
_UpperCAmelCase : str = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# model
_UpperCAmelCase : Optional[int] = chkpt["models"][0]
_UpperCAmelCase : int = model.state_dict()
# rename keys to start with 'model.'
_UpperCAmelCase : Union[str, Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_UpperCAmelCase : Any = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTConfig.from_pretrained(lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration(lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
# save
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 467
| 1
|
def UpperCAmelCase_ ( __lowerCAmelCase ) -> list:
__lowercase : Union[str, Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowercase : Tuple = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase : Optional[Any] = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase : int = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__lowerCAmelCase : List[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase : List[Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 509
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
__lowercase : Optional[Any] = val
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__lowercase : Tuple = value
else:
__lowercase : Union[str, Any] = value
return new_state_dict
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
__lowercase : Tuple = ''''''
if is_panoptic:
__lowercase : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase : List[str] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__lowercase : Union[str, Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : int = in_proj_weight[:256, :]
__lowercase : List[str] = in_proj_bias[:256]
__lowercase : Tuple = in_proj_weight[256:512, :]
__lowercase : Tuple = in_proj_bias[256:512]
__lowercase : str = in_proj_weight[-256:, :]
__lowercase : Optional[Any] = in_proj_bias[-256:]
def UpperCAmelCase_ ( ) -> List[str]:
__lowercase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowercase : int = '''resnet101'''
if "dc5" in model_name:
__lowercase : Optional[Any] = True
__lowercase : int = '''panoptic''' in model_name
if is_panoptic:
__lowercase : Optional[int] = 250
else:
__lowercase : List[Any] = 91
__lowercase : int = '''huggingface/label-files'''
__lowercase : int = '''coco-detection-id2label.json'''
__lowercase : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : List[Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load image processor
__lowercase : Optional[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowercase : List[str] = ConditionalDetrImageProcessor(format=__lowerCAmelCase )
# prepare image
__lowercase : Union[str, Any] = prepare_img()
__lowercase : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowercase : str = encoding['''pixel_values''']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
__lowercase : Tuple = torch.hub.load('''DeppMeng/ConditionalDETR''' , __lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
__lowercase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowercase : Tuple = '''conditional_detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : Any = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase : Any = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowercase : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
__lowercase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowercase : str = state_dict.pop(__lowerCAmelCase )
__lowercase : Tuple = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowercase : Any = state_dict.pop(__lowerCAmelCase )
__lowercase : List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowercase : Any = state_dict.pop(__lowerCAmelCase )
__lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
__lowercase : Union[str, Any] = ConditionalDetrForSegmentation(__lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__lowercase : Optional[Any] = conditional_detr(__lowerCAmelCase )
__lowercase : Dict = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 509
| 1
|
def _a ( SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Optional[int] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase__ : int = a * b * c
if candidate >= product:
UpperCamelCase__ : Dict = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 708
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__UpperCamelCase : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__UpperCamelCase : Optional[int] = "main"
# Default branch name
__UpperCamelCase : Optional[Any] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__UpperCamelCase : str = "aaaaaaa"
# This commit does not exist, so we should 404.
__UpperCamelCase : Optional[Any] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__UpperCamelCase : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __magic_name__ ( unittest.TestCase):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_tf
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_flax
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
| 106
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase_ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase_ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : int = DPRContextEncoderTokenizer
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : List[str] = DPRQuestionEncoderTokenizer
lowercase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase_ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __A :
'''simple docstring'''
def __call__(self , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , **A , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_a = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_a = titles if not isinstance(A , A ) else [titles]
_a = texts if not isinstance(A , A ) else [texts]
_a = len(A )
_a = questions if not isinstance(A , A ) else [questions] * n_passages
assert len(A ) == len(
A ), f'''There should be as many titles than texts but got {len(A )} titles and {len(A )} texts.'''
_a = super().__call__(A , A , padding=A , truncation=A )['''input_ids''']
_a = super().__call__(A , add_special_tokens=A , padding=A , truncation=A )['''input_ids''']
_a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A )
]
}
if return_attention_mask is not False:
_a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A )
def a__ (self , A , A , A = 16 , A = 64 , A = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_a = reader_input['''input_ids''']
_a , _a , _a = reader_output[:3]
_a = len(A )
_a = sorted(range(A ) , reverse=A , key=relevance_logits.__getitem__ )
_a = []
for doc_id in sorted_docs:
_a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a = sequence_ids.index(self.pad_token_id )
else:
_a = len(A )
_a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ (self , A , A , A , A , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_a = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a = sorted(A , key=lambda A : x[1] , reverse=A )
_a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
_a = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Dict = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
__lowerCamelCase : Optional[int] = DPRReaderTokenizer
| 11
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Union[str, Any] =old_name
if "patch_embed" in old_name:
snake_case__, snake_case__, snake_case__ : int =old_name.split('''.''' )
if layer == "0":
snake_case__ : Tuple =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : int =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : str =old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : Tuple =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any =re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group()
else:
snake_case__ : List[Any] =re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
snake_case__ : int =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
snake_case__ : Tuple =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : Union[str, Any] ='''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : str =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : Optional[Any] =trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ):
snake_case__ : int =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Any =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : List[Any] =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Union[str, Any] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : int =new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : Dict ='''efficientformer.''' + new_name
else:
snake_case__ : List[Any] ='''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : List[Any] =checkpoint.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
return checkpoint
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def lowercase_ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
snake_case__ : int =EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : List[Any] =config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Dict =convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Any =prepare_img()
snake_case__ : str =2_56
snake_case__ : List[Any] =2_24
snake_case__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int =processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : List[str] =Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
snake_case__ : Tuple =image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.logits
snake_case__ : Optional[Any] =(1, 10_00)
if "l1" in model_name:
snake_case__ : Union[str, Any] =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Optional[Any] =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Dict =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 381
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.