code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_a = logging.getLogger(__name__)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = np.argmax(__snake_case ,axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
with open(__snake_case ,encoding='''utf_8''' ) as f:
lowerCamelCase__ = csv.reader(__snake_case )
lowerCamelCase__ = []
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = []
for dataset in encoded_datasets:
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
lowerCamelCase__ = np.zeros((n_batch, 2) ,dtype=np.intaa )
lowerCamelCase__ = np.full((n_batch, 2, input_len) ,fill_value=-100 ,dtype=np.intaa )
lowerCamelCase__ = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
lowerCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase__ = with_conta
lowerCamelCase__ = with_conta
lowerCamelCase__ = len(__snake_case ) - 1
lowerCamelCase__ = len(__snake_case ) - 1
lowerCamelCase__ = with_conta
lowerCamelCase__ = with_conta
lowerCamelCase__ = mc_label
lowerCamelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=__snake_case ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=__snake_case ,type=__snake_case ,required=__snake_case ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=__snake_case ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=__snake_case ,default='''''' )
parser.add_argument('''--seed''' ,type=__snake_case ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=__snake_case ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=__snake_case ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=__snake_case ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1E-8 ,type=__snake_case ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=__snake_case ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=__snake_case ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=__snake_case ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=__snake_case ,default=6.25E-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=__snake_case ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=__snake_case ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=__snake_case ,default=0.0_1 )
parser.add_argument('''--lm_coef''' ,type=__snake_case ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=__snake_case ,default=374 )
parser.add_argument('''--server_ip''' ,type=__snake_case ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=__snake_case ,default='''''' ,help='''Can be used for distant debugging.''' )
lowerCamelCase__ = parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCamelCase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase__ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(__snake_case ,__snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCamelCase__ = ['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__snake_case )
lowerCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case ):
if isinstance(__snake_case ,__snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case ,__snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCamelCase__ = load_rocstories_dataset(args.train_dataset )
lowerCamelCase__ = load_rocstories_dataset(args.eval_dataset )
lowerCamelCase__ = (train_dataset, eval_dataset)
lowerCamelCase__ = tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
lowerCamelCase__ = model.config.n_positions // 2 - 2
lowerCamelCase__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCamelCase__ = min(__snake_case ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCamelCase__ = pre_process_datasets(__snake_case ,__snake_case ,__snake_case ,*__snake_case )
lowerCamelCase__ , lowerCamelCase__ = tensor_datasets[0], tensor_datasets[1]
lowerCamelCase__ = TensorDataset(*__snake_case )
lowerCamelCase__ = RandomSampler(__snake_case )
lowerCamelCase__ = DataLoader(__snake_case ,sampler=__snake_case ,batch_size=args.train_batch_size )
lowerCamelCase__ = TensorDataset(*__snake_case )
lowerCamelCase__ = SequentialSampler(__snake_case )
lowerCamelCase__ = DataLoader(__snake_case ,sampler=__snake_case ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCamelCase__ = args.max_steps
lowerCamelCase__ = args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowerCamelCase__ = len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCamelCase__ = list(model.named_parameters() )
lowerCamelCase__ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCamelCase__ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCamelCase__ = AdamW(__snake_case ,lr=args.learning_rate ,eps=args.adam_epsilon )
lowerCamelCase__ = get_linear_schedule_with_warmup(
__snake_case ,num_warmup_steps=args.warmup_steps ,num_training_steps=__snake_case )
if args.do_train:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = tqdm(__snake_case ,desc='''Training''' )
for step, batch in enumerate(__snake_case ):
lowerCamelCase__ = tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = batch
lowerCamelCase__ = model(__snake_case ,mc_token_ids=__snake_case ,lm_labels=__snake_case ,mc_labels=__snake_case )
lowerCamelCase__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCamelCase__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCamelCase__ = '''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCamelCase__ = model.module if hasattr(__snake_case ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCamelCase__ = os.path.join(args.output_dir ,__snake_case )
lowerCamelCase__ = os.path.join(args.output_dir ,__snake_case )
torch.save(model_to_save.state_dict() ,__snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
lowerCamelCase__ , lowerCamelCase__ = 0, 0
lowerCamelCase__ , lowerCamelCase__ = 0, 0
for batch in tqdm(__snake_case ,desc='''Evaluating''' ):
lowerCamelCase__ = tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = batch
with torch.no_grad():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model(
__snake_case ,mc_token_ids=__snake_case ,lm_labels=__snake_case ,mc_labels=__snake_case )
lowerCamelCase__ = mc_logits.detach().cpu().numpy()
lowerCamelCase__ = mc_labels.to('''cpu''' ).numpy()
lowerCamelCase__ = accuracy(__snake_case ,__snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCamelCase__ = eval_loss / nb_eval_steps
lowerCamelCase__ = eval_accuracy / nb_eval_examples
lowerCamelCase__ = tr_loss / nb_tr_steps if args.do_train else None
lowerCamelCase__ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCamelCase__ = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(__snake_case ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,__snake_case ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 481
|
def lowerCAmelCase__(__snake_case ) -> list:
'''simple docstring'''
def merge(__snake_case ,__snake_case ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__snake_case ) <= 1:
return collection
lowerCamelCase__ = len(__snake_case ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = input("Enter numbers separated by a comma:\n").strip()
_a = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 481
| 1
|
'''simple docstring'''
from typing import List
import numpy as np
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={key: len(_lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase =max(lists_lengths.values() , default=0 )
return max(1 , _lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for group_idx in range(_lowerCAmelCase ):
__lowercase =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase =range(_lowerCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(_lowerCAmelCase )
return shards_indices_per_group
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =_number_of_shards_in_gen_kwargs(_lowerCAmelCase )
if num_shards == 1:
return [dict(_lowerCAmelCase )]
else:
__lowercase =_distribute_shards(num_shards=_lowerCAmelCase , max_num_jobs=_lowerCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowerCAmelCase ) )
]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowerCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={len(_lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
__lowercase ={}
for size in list_sizes:
__lowercase =list(range(_lowerCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase =dict(_lowerCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =[value[i] for i in indices_per_size[len(_lowerCAmelCase )]]
return shuffled_kwargs
| 454
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase = 16
lowerCamelCase = 32
def _A ( _lowerCAmelCase , _lowerCAmelCase = 16 ):
"""simple docstring"""
__lowercase =AutoTokenizer.from_pretrained('bert-base-cased' )
__lowercase =load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase =datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase =16
elif accelerator.mixed_precision != "no":
__lowercase =8
else:
__lowercase =None
return tokenizer.pad(
_lowerCAmelCase , padding='longest' , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
__lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase = mocked_dataloaders # noqa: F811
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _lowerCAmelCase ) == "1":
__lowercase =2
# New Code #
__lowercase =int(args.gradient_accumulation_steps )
__lowercase =int(args.local_sgd_steps )
# Initialize accelerator
__lowercase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase =config['lr']
__lowercase =int(config['num_epochs'] )
__lowercase =int(config['seed'] )
__lowercase =int(config['batch_size'] )
__lowercase =evaluate.load('glue' , 'mrpc' )
set_seed(_lowerCAmelCase )
__lowercase , __lowercase =get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase =model.to(accelerator.device )
# Instantiate optimizer
__lowercase =AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
__lowercase =get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=_lowerCAmelCase , model=_lowerCAmelCase , local_sgd_steps=_lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCAmelCase ):
__lowercase =model(**_lowerCAmelCase )
__lowercase =output.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase )
__lowercase =outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
__lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase )
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_lowerCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_lowerCAmelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowercase =parser.parse_args()
__lowercase ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__A = TypeVar("T")
class UpperCAmelCase (Generic[T] ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
lowercase__: str = data
lowercase__: Tuple = self
lowercase__: Dict = 0
class UpperCAmelCase (Generic[T] ):
"""simple docstring"""
def __init__( self ):
# map from node name to the node object
lowercase__: dict[T, DisjointSetTreeNode[T]] = {}
def _snake_case ( self , _UpperCAmelCase ):
# create a new set with x as its member
lowercase__: Union[str, Any] = DisjointSetTreeNode(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# find the set x belongs to (with path-compression)
lowercase__: Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
lowercase__: Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowercase__: Optional[int] = nodea
else:
lowercase__: List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# merge 2 disjoint sets
self.link(self.find_set(_UpperCAmelCase ) , self.find_set(_UpperCAmelCase ) )
class UpperCAmelCase (Generic[T] ):
"""simple docstring"""
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowercase__: dict[T, dict[T, int]] = {}
def _snake_case ( self , _UpperCAmelCase ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowercase__: Optional[int] = {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# add an edge with the given weight
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
lowercase__: Any = weight
lowercase__: Dict = weight
def _snake_case ( self ):
lowercase__: int = []
lowercase__: str = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
lowercase__: Any = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_UpperCAmelCase )
# MST generation
lowercase__: Optional[Any] = 0
lowercase__: Optional[int] = 0
lowercase__: Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase__, lowercase__, lowercase__: List[Any] = edges[index]
index += 1
lowercase__: Union[str, Any] = disjoint_set.find_set(_UpperCAmelCase )
lowercase__: List[Any] = disjoint_set.find_set(_UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
disjoint_set.union(_UpperCAmelCase , _UpperCAmelCase )
return graph
| 586
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :int
_UpperCAmelCase :Node | None = None
_UpperCAmelCase :Node | None = None
def SCREAMING_SNAKE_CASE__ ( ) -> Node | None:
lowercase__: List[Any] = Node(1 )
lowercase__: Dict = Node(2 )
lowercase__: Optional[int] = Node(3 )
lowercase__: Tuple = Node(4 )
lowercase__: Tuple = Node(5 )
return tree
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
if root is None:
return output
lowercase__: Tuple = deque([root] )
while process_queue:
lowercase__: Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
lowercase__: list[Sequence[Node | None]] = []
lowercase__: Dict = 0
lowercase__: Optional[Any] = height(__UpperCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCAmelCase , __UpperCAmelCase ) )
lowercase__: Tuple = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCAmelCase , __UpperCAmelCase ) )
lowercase__: Any = 0
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None: # Main function for testing.
lowercase__: Dict = make_tree()
print(F"""In-order Traversal: {inorder(__UpperCAmelCase )}""" )
print(F"""Pre-order Traversal: {preorder(__UpperCAmelCase )}""" )
print(F"""Post-order Traversal: {postorder(__UpperCAmelCase )}""" , '''\n''' )
print(F"""Height of Tree: {height(__UpperCAmelCase )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__UpperCAmelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(__UpperCAmelCase ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__UpperCAmelCase , level=__UpperCAmelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 586
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCamelCase : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__lowerCamelCase : Optional[int] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : List[str] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple="replace" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=True , **UpperCamelCase__ : Any , ) -> int:
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
_UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
_UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
_UpperCamelCase =add_prefix_space
_UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
_UpperCamelCase =add_prefix_space
_UpperCamelCase ='''post_processor'''
_UpperCamelCase =getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
_UpperCamelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase =tuple(state['''sep'''] )
if "cls" in state:
_UpperCamelCase =tuple(state['''cls'''] )
_UpperCamelCase =False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
_UpperCamelCase =add_prefix_space
_UpperCamelCase =True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
_UpperCamelCase =trim_offsets
_UpperCamelCase =True
if changes_to_apply:
_UpperCamelCase =getattr(UpperCamelCase__ , state.pop('''type''' ) )
_UpperCamelCase =component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase__ ( self : int ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self : int , UpperCamelCase__ : Tuple ) -> Optional[Any]:
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
_UpperCamelCase =value
def UpperCamelCase__ ( self : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ) -> BatchEncoding:
_UpperCamelCase =kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any] ) -> BatchEncoding:
_UpperCamelCase =kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase =[self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[Any]:
return token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : "Conversation" ) -> List[int]:
_UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
_UpperCamelCase =''' '''.join(UpperCamelCase__ )
_UpperCamelCase =self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
_UpperCamelCase =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 404
|
'''simple docstring'''
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> List[Any]:
_UpperCamelCase =''''''
_UpperCamelCase =''''''
_UpperCamelCase =[]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase =self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =[[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase =j
elif j == 0: # second string is empty
_UpperCamelCase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase =self.dp[i - 1][j - 1]
else:
_UpperCamelCase =self.dp[i][j - 1]
_UpperCamelCase =self.dp[i - 1][j]
_UpperCamelCase =self.dp[i - 1][j - 1]
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : int = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCamelCase : Optional[int] = input('Enter the first string: ').strip()
__lowerCamelCase : Optional[int] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 404
| 1
|
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "scipy"]
def __init__(self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str) ->int:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
| 437
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__: Union[str, Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: Dict =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Optional[int] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Any =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =TextToVideoSDPipeline(**UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Any =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] ="np"
lowerCamelCase__: Optional[int] =sd_pipe(**UpperCAmelCase_).frames
lowerCamelCase__: Dict =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase__: Optional[int] =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
lowerCamelCase__: Optional[int] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: Tuple =pipe.to("cuda")
lowerCamelCase__: List[Any] ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="pt").frames
lowerCamelCase__: str =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
lowerCamelCase__: List[str] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: Any =pipe.to("cuda")
lowerCamelCase__: Dict ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
lowerCamelCase__: List[Any] =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 437
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( _UpperCamelCase ):
def __init__( self : List[Any] , A : int = 1_0_1 ):
_UpperCAmelCase : Tuple = length
def __len__( self : Union[str, Any] ):
return self.length
def __getitem__( self : List[str] , A : List[str] ):
return i
class UpperCAmelCase_ :
def __call__( self : Union[str, Any] , A : Any ):
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase : Tuple = nn.Linear(1_2_0 , 8_0 )
def snake_case_ ( self : Union[str, Any] , A : Any , A : List[str]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCAmelCase_ ( _UpperCamelCase ):
@require_torch_neuroncore
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Any = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : str = f'--output_dir {output_dir}'.split()
_UpperCAmelCase : Optional[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( _UpperCamelCase ):
@require_torch_multi_gpu
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Any = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Any = f'--output_dir {output_dir}'.split()
_UpperCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_lowerCAmelCase : Optional[int] = HfArgumentParser((TrainingArguments,))
_lowerCAmelCase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_lowerCAmelCase : Tuple = DummyDataset(dataset_length)
def __snake_case ( SCREAMING_SNAKE_CASE__ : EvalPrediction ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
_UpperCAmelCase : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_lowerCAmelCase : int = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowerCAmelCase : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowerCAmelCase : str = 2
_lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowerCAmelCase : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowerCAmelCase : str = None
| 289
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("RGB" )
return image
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = val
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : List[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCAmelCase : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) )
_UpperCAmelCase : Union[str, Any] = qkv_bias
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = 364 if "coco" in model_name else 224
_UpperCAmelCase : Dict = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Any = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
_UpperCAmelCase : Optional[Any] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__ , text_config=SCREAMING_SNAKE_CASE__ )
return config, image_size
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_UpperCAmelCase : Optional[int] = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : int = get_blipa_config(SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
_UpperCAmelCase : Optional[int] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase : str = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , is_eval=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : Optional[Any] = original_model.state_dict()
_UpperCAmelCase : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Optional[Any] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
_UpperCAmelCase : Dict = key.replace("opt" , "language" )
if key.startswith("t5" ):
_UpperCAmelCase : Union[str, Any] = key.replace("t5" , "language" )
_UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Dict = load_demo_image()
_UpperCAmelCase : List[Any] = vis_processors["eval"](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
# create processor
_UpperCAmelCase : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
hf_model.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : str = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_UpperCAmelCase : Optional[int] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).logits
else:
_UpperCAmelCase : List[Any] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_UpperCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Union[str, Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE__ )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Any = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE__ )
else:
# cast to same type
_UpperCAmelCase : List[str] = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
_UpperCAmelCase : int = ""
_UpperCAmelCase : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = original_model.generate({"image": original_pixel_values} )
_UpperCAmelCase : int = hf_model.generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = input_ids.shape[1]
_UpperCAmelCase : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 289
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase__ : Optional[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCAmelCase__ : Any = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCAmelCase__ : str = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : List[str]="binary" , UpperCAmelCase_ : Tuple=None ):
"""simple docstring"""
__UpperCAmelCase : int = fa_score(
UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ , pos_label=UpperCAmelCase_ , average=UpperCAmelCase_ , sample_weight=UpperCAmelCase_ )
return {"f1": float(UpperCAmelCase_ ) if score.size == 1 else score}
| 329
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__ : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCAmelCase__ : str = {"facebook/blenderbot_small-90M": 5_12}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = set()
__UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : str = char
__UpperCAmelCase : Optional[int] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str="__start__" , UpperCAmelCase_ : List[Any]="__end__" , UpperCAmelCase_ : str="__unk__" , UpperCAmelCase_ : int="__null__" , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : Any = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Dict = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__UpperCAmelCase : Dict = {}
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub("([.,!?()])" , R" \1" , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub("(')" , R" \1 " , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub(R"\s{2,}" , " " , UpperCAmelCase_ )
if "\n" in token:
__UpperCAmelCase : List[Any] = token.replace("\n" , " __newln__" )
__UpperCAmelCase : str = token.split(" " )
__UpperCAmelCase : int = []
for token in tokens:
if not len(UpperCAmelCase_ ):
continue
__UpperCAmelCase : Any = token.lower()
__UpperCAmelCase : Optional[Any] = tuple(UpperCAmelCase_ )
__UpperCAmelCase : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
if not pairs:
words.append(UpperCAmelCase_ )
continue
while True:
__UpperCAmelCase : Union[str, Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : int = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCAmelCase_ ):
try:
__UpperCAmelCase : str = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
new_word.extend(word[i:j] )
__UpperCAmelCase : Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
__UpperCAmelCase : int = "@@ ".join(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = word[:-4]
__UpperCAmelCase : Any = word
words.append(UpperCAmelCase_ )
return " ".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = re.findall(R"\S+\n?" , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : str = token.lower()
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = " ".join(UpperCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
__UpperCAmelCase : Dict = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 329
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@require_torch
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
a__ : Dict = load_dataset("ashraq/esc50" )
a__ : Union[str, Any] = dataset["train"]["audio"][-1]["array"]
a__ : Optional[int] = audio_classifier(a_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a_ ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
a__ : Any = load_dataset("ashraq/esc50" )
a__ : Tuple = dataset["train"]["audio"][-1]["array"]
a__ : Any = audio_classifier(a_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
a__ : List[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
a__ : List[str] = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
| 642
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Dict , a_ : List[str]=False , a_ : Any=True , a_ : int=False , a_ : Union[str, Any]="<s>" , a_ : Optional[int]="</s>" , a_ : int="<unk>" , a_ : List[Any]="<sep>" , a_ : Dict="<pad>" , a_ : Any="<cls>" , a_ : Optional[Any]="<mask>" , a_ : int=["<eop>", "<eod>"] , a_ : Optional[Dict[str, Any]] = None , **a_ : int , ) -> None:
'''simple docstring'''
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : Union[str, Any] = 3
a__ : Dict = do_lower_case
a__ : Union[str, Any] = remove_space
a__ : int = keep_accents
a__ : str = vocab_file
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a__ : Optional[int] = jieba
a__ : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self : Tuple , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.remove_space:
a__ : Union[str, Any] = " ".join(inputs.strip().split() )
else:
a__ : Optional[Any] = inputs
a__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__ : Union[str, Any] = unicodedata.normalize("NFKD" , a_ )
a__ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a__ : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.preprocess_text(a_ )
a__ : Dict = self.sp_model.encode(a_ , out_type=a_ )
a__ : Optional[Any] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ : List[str] = cur_pieces[1:]
else:
a__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self : int , a_ : Dict ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self : Dict , a_ : Tuple ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , *a_ : Union[str, Any] , **a_ : Any ) -> int:
'''simple docstring'''
a__ : Optional[int] = super()._decode(*a_ , **a_ )
a__ : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 642
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_SCREAMING_SNAKE_CASE = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sgugger/tiny-distilbert-classification"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , only_pretrain_model=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , torchscript=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
# set architectures equal to `None`
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ , configs=[config] )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ , configs=[config] )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ , configs=[config] )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ , configs=[config] )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ , configs=[config] )
_SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , save_to_csv=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase_ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase_ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase_ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase_ , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase_ , """env.csv""" ) , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """env.csv""" ) ).exists() )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCAmelCase_: Dict ):
self.assertTrue(hasattr(UpperCAmelCase_ , """sequential""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """current""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase_ , """log.txt""" ) , log_print=UpperCAmelCase_ , trace_memory_line_by_line=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = PyTorchBenchmark(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , """log.txt""" ) ).exists() )
| 569
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=snake_case__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
# Load the entity vocab file
_SCREAMING_SNAKE_CASE = load_entity_vocab(snake_case__ )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE = AddedToken("""<ent>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
_SCREAMING_SNAKE_CASE = AddedToken("""<ent2>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE = state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["""[MASK]"""]]
_SCREAMING_SNAKE_CASE = LukeModel(config=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ ,task="""entity_classification""" )
_SCREAMING_SNAKE_CASE = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_SCREAMING_SNAKE_CASE = (39, 42)
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,entity_spans=[span] ,add_prefix_space=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().split("""\t""" )
_SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 569
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE:
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = {}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1 ) -> Dict:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__SCREAMING_SNAKE_CASE :Tuple = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = []
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return list(self.graph )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ,SCREAMING_SNAKE_CASE__=-1 ) -> List[Any]:
"""simple docstring"""
if s == d:
return []
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :List[str] = []
if s == -2:
__SCREAMING_SNAKE_CASE :Optional[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :str = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :List[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-1 ) -> Any:
"""simple docstring"""
if c == -1:
__SCREAMING_SNAKE_CASE :Dict = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__SCREAMING_SNAKE_CASE :Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1 )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = deque()
__SCREAMING_SNAKE_CASE :Optional[int] = []
if s == -2:
__SCREAMING_SNAKE_CASE :Optional[Any] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
__SCREAMING_SNAKE_CASE :List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return len(self.graph[u] )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = []
__SCREAMING_SNAKE_CASE :List[str] = []
if s == -2:
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = s
__SCREAMING_SNAKE_CASE :Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :List[str] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return sorted_nodes
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = []
__SCREAMING_SNAKE_CASE :Tuple = []
__SCREAMING_SNAKE_CASE :Optional[int] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = -2
__SCREAMING_SNAKE_CASE :Optional[Any] = []
__SCREAMING_SNAKE_CASE :int = s
__SCREAMING_SNAKE_CASE :List[str] = False
__SCREAMING_SNAKE_CASE :List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE :List[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE :Optional[int] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :Tuple = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :List[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = s
__SCREAMING_SNAKE_CASE :int = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :Optional[int] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = -2
__SCREAMING_SNAKE_CASE :Optional[Any] = []
__SCREAMING_SNAKE_CASE :int = s
__SCREAMING_SNAKE_CASE :int = False
__SCREAMING_SNAKE_CASE :Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE :Tuple = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE :Optional[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :Dict = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = s
__SCREAMING_SNAKE_CASE :Tuple = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ,SCREAMING_SNAKE_CASE__=-1 ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = time()
self.dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = time()
return end - begin
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = time()
return end - begin
class _SCREAMING_SNAKE_CASE:
def __init__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = {}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1 ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__SCREAMING_SNAKE_CASE :Any = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__SCREAMING_SNAKE_CASE :Optional[Any] = [[w, u]]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ,SCREAMING_SNAKE_CASE__=-1 ) -> List[str]:
"""simple docstring"""
if s == d:
return []
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :List[Any] = []
if s == -2:
__SCREAMING_SNAKE_CASE :str = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :Optional[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :List[str] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
"""simple docstring"""
if c == -1:
__SCREAMING_SNAKE_CASE :List[Any] = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__SCREAMING_SNAKE_CASE :Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1 )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = deque()
__SCREAMING_SNAKE_CASE :Optional[Any] = []
if s == -2:
__SCREAMING_SNAKE_CASE :List[Any] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
__SCREAMING_SNAKE_CASE :Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return len(self.graph[u] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = []
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = -2
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :List[Any] = s
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE :Dict = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE :Optional[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :Optional[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :int = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = s
__SCREAMING_SNAKE_CASE :int = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
__SCREAMING_SNAKE_CASE :Optional[int] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = -2
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :Optional[int] = s
__SCREAMING_SNAKE_CASE :Optional[int] = False
__SCREAMING_SNAKE_CASE :Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE :List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE :Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE :List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE :Union[str, Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
__SCREAMING_SNAKE_CASE :Optional[int] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = s
__SCREAMING_SNAKE_CASE :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return list(self.graph )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ,SCREAMING_SNAKE_CASE__=-1 ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = time()
self.dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = time()
return end - begin
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=-2 ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = time()
return end - begin
| 498
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE( A ):
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
| 498
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE = 3
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowercase : List[str] =random.randrange(3 , __A )
if pow(__A , 2 , __A ) == 1:
continue
if pow(__A , __A , __A ) == 1:
continue
return g
def lowercase_ ( __A : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
lowercase : Any =rabin_miller.generate_large_prime(__A ) # select large prime number.
lowercase : List[str] =primitive_root(__A ) # one primitive root on modulo p.
lowercase : List[str] =random.randrange(3 , __A ) # private_key -> have to be greater than 2 for safety.
lowercase : List[str] =cryptomath.find_mod_inverse(pow(__A , __A , __A ) , __A )
lowercase : Union[str, Any] =(key_size, e_a, e_a, p)
lowercase : Optional[Any] =(key_size, d)
return public_key, private_key
def lowercase_ ( __A : str , __A : int ) -> None:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase , lowercase : str =generate_key(__A )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def lowercase_ ( ) -> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 8
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''camembert'''
def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : List[Any] = classifier_dropout
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 614
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCamelCase__ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
UpperCamelCase__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ : Union[str, Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ : Optional[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCamelCase__ : Optional[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase =Accelerator()
lowerCamelCase =(accelerator.state.process_index + 2, 1_0)
lowerCamelCase =torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase =""
lowerCamelCase =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 462
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462
| 1
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a_ = logging.get_logger(__name__)
a_ = '''T5Config'''
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = """mt5"""
_A : List[Any] = MTaConfig
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """mt5"""
_A : str = MTaConfig
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """mt5"""
_A : Any = MTaConfig
| 480
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : List[Any] = args.pruning_method
snake_case_ : Any = args.threshold
snake_case_ : Optional[Any] = args.model_name_or_path.rstrip("""/""" )
snake_case_ : Optional[Any] = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
snake_case_ : str = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
snake_case_ : Optional[int] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : Dict = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : List[Any] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
snake_case_ : Tuple = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
snake_case_ : List[Any] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : int = model[f'{prefix_}mask_scores']
snake_case_ : Optional[Any] = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : str = name[:-6]
snake_case_ : str = model[f'{prefix_}mask_scores']
snake_case_ : List[str] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[Any] = name[:-6]
snake_case_ : Optional[int] = model[f'{prefix_}mask_scores']
snake_case_ , snake_case_ : List[str] = -0.1, 1.1
snake_case_ : Optional[int] = torch.sigmoid(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = s * (r - l) + l
snake_case_ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
snake_case_ : List[str] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__ ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__ )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
shutil.copytree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a_ = parser.parse_args()
main(args)
| 480
| 1
|
def _lowercase ( _UpperCAmelCase ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
lowerCamelCase =sorted(string.lower() )
return len(_UpperCAmelCase ) == len(set(_UpperCAmelCase ) )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] =input('''Enter a string ''').strip()
UpperCAmelCase__ : List[str] =is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 269
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A ( a ):
def _snake_case ( self ):
lowerCamelCase =SMALL_MODEL_IDENTIFIER
lowerCamelCase ="""pt"""
lowerCamelCase ="""tf"""
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCAmelCase_ )
model_tf.save_pretrained(UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase ="""mock_framework"""
# Framework provided - return whatever the user provides
lowerCamelCase =FeaturesManager.determine_framework(self.test_model , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
| 269
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : str = TypeVar("""T""")
SCREAMING_SNAKE_CASE__ : int = TypeVar("""U""")
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = key
__magic_name__ :List[str] = val
__magic_name__ :DoubleLinkedListNode[T, U] | None = None
__magic_name__ :DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
__magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ , __magic_name__ :Union[str, Any] = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
__magic_name__ :Any = ['''DoubleLinkedList''']
__magic_name__ :Any = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ :str = node
__magic_name__ :str = previous
__magic_name__ :Dict = node
__magic_name__ :Optional[Any] = self.rear
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
__magic_name__ :str = node.next
__magic_name__ :Any = node.prev
__magic_name__ :int = None
__magic_name__ :List[str] = None
return node
class lowerCamelCase_ ( Generic[T, U] ):
a__ = {}
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ :Dict = capacity
__magic_name__ :Union[str, Any] = 0
__magic_name__ :Dict = 0
__magic_name__ :Optional[Any] = 0
__magic_name__ :dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , __lowerCAmelCase ):
"""simple docstring"""
return key in self.cache
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__magic_name__ :DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ :int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ :Union[str, Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ :List[Any] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ :str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ :Any = value
self.list.add(__lowerCAmelCase )
@classmethod
def A ( cls , __lowerCAmelCase = 1_2_8 ):
"""simple docstring"""
def cache_decorator_inner(__lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ :List[str] = LRUCache(__lowerCAmelCase )
__magic_name__ :Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ :Optional[Any] = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , '''cache_info''' , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
'''simple docstring'''
import enum
import shutil
import sys
a__ , a__ : Any = shutil.get_terminal_size()
a__ : Optional[int] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __snake_case ( enum.Enum ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_="" ) ->Optional[Any]:
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" ) ->List[str]:
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def __lowerCamelCase ( ) ->Optional[Any]:
forceWrite('\r' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) ->str:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) ->Tuple:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 368
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Dict = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCamelCase__ :List[Any] = 6
UpperCamelCase__ :Dict = 128
UpperCamelCase__ :Dict = (2, 2, 18, 2)
UpperCamelCase__ :Tuple = (4, 8, 16, 32)
elif "large" in model_name:
UpperCamelCase__ :Dict = 12
UpperCamelCase__ :Tuple = 192
UpperCamelCase__ :List[str] = (2, 2, 18, 2)
UpperCamelCase__ :Tuple = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
UpperCamelCase__ :Tuple = window_size
UpperCamelCase__ :Union[str, Any] = embed_dim
UpperCamelCase__ :int = depths
UpperCamelCase__ :Tuple = num_heads
return config
def a ( __a ) -> Optional[int]:
'''simple docstring'''
if "encoder.mask_token" in name:
UpperCamelCase__ :Union[str, Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
UpperCamelCase__ :Any = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
UpperCamelCase__ :Any = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
UpperCamelCase__ :Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase__ :Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ :Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ :Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ :int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ :Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ :int = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCamelCase__ :str = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCamelCase__ :List[Any] = "swin." + name
return name
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :List[Any] = orig_state_dict.pop(__a )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCamelCase__ :Union[str, Any] = key.split('''.''' )
UpperCamelCase__ :List[str] = int(key_split[2] )
UpperCamelCase__ :Union[str, Any] = int(key_split[4] )
UpperCamelCase__ :List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ :List[Any] = val[:dim, :]
UpperCamelCase__ :Optional[int] = val[
dim : dim * 2, :
]
UpperCamelCase__ :Any = val[-dim:, :]
else:
UpperCamelCase__ :Optional[int] = val[
:dim
]
UpperCamelCase__ :List[Any] = val[
dim : dim * 2
]
UpperCamelCase__ :Any = val[
-dim:
]
else:
UpperCamelCase__ :str = val
return orig_state_dict
def a ( __a , __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = torch.load(__a , map_location='''cpu''' )["model"]
UpperCamelCase__ :Union[str, Any] = get_swin_config(__a )
UpperCamelCase__ :Optional[Any] = SwinForMaskedImageModeling(__a )
model.eval()
UpperCamelCase__ :List[str] = convert_state_dict(__a , __a )
model.load_state_dict(__a )
UpperCamelCase__ :Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ :List[str] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
UpperCamelCase__ :int = Image.open(requests.get(__a , stream=__a ).raw )
UpperCamelCase__ :int = image_processor(images=__a , return_tensors='''pt''' )
with torch.no_grad():
UpperCamelCase__ :str = model(**__a ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 1
@register_to_config
def __init__( self , UpperCamelCase_=2000 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=1e-3 ):
'''simple docstring'''
UpperCamelCase__ :Dict = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :List[str] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase_ , device=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase__ :Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase__ :Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase__ :Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase__ :int = std.unsqueeze(-1 )
UpperCamelCase__ :List[Any] = -score / std
# compute
UpperCamelCase__ :List[str] = -1.0 / len(self.timesteps )
UpperCamelCase__ :Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase__ :Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase__ :Any = beta_t.unsqueeze(-1 )
UpperCamelCase__ :Optional[int] = -0.5 * beta_t * x
UpperCamelCase__ :int = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ :List[str] = drift - diffusion**2 * score
UpperCamelCase__ :Dict = x + drift * dt
# add noise
UpperCamelCase__ :List[Any] = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase_ , device=x.device , dtype=x.dtype )
UpperCamelCase__ :int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 280
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=0 , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_input_mask
lowercase =use_token_type_ids
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =num_labels
lowercase =num_choices
lowercase =scope
lowercase =projection_dim
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
lowercase =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFDPRContextEncoder(config=snake_case_ )
lowercase =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
lowercase =model(snake_case_ , token_type_ids=snake_case_ )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFDPRQuestionEncoder(config=snake_case_ )
lowercase =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
lowercase =model(snake_case_ , token_type_ids=snake_case_ )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFDPRReader(config=snake_case_ )
lowercase =model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) =config_and_inputs
lowercase ={'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFDPRModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case_ )
@slow
def _A( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =TFDPRContextEncoder.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =TFDPRContextEncoder.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =TFDPRQuestionEncoder.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =TFDPRReader.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _A( self ):
lowercase =TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowercase =tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase =model(snake_case_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase =tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 145
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase =1.5
lowercase =int(factor * num_class_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowercase_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase =client.query(text=lowercase_ )
if len(lowercase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase =int(factor * num_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 , )
lowercase =0
lowercase =0
lowercase =tqdm(desc='''downloading real regularization images''' , total=lowercase_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowercase =class_images[count]
count += 1
try:
lowercase =requests.get(images['''url'''] )
if img.status_code == 2_0_0:
lowercase =Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =argparse.ArgumentParser('''''' , add_help=lowercase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=lowercase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 145
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowerCAmelCase : Any =TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
__magic_name__ = 42 # Cache store of keys
__magic_name__ = 42 # References of the keys in cache
__magic_name__ = 1_0 # Maximum capacity of cache
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: List[str] = deque()
UpperCAmelCase__: Dict = set()
if not n:
UpperCAmelCase__: Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCAmelCase__: Dict = n
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase__: str = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase__ )
else:
self.dq_store.remove(lowerCamelCase__ )
self.dq_store.appendleft(lowerCamelCase__ )
self.key_reference.add(lowerCamelCase__ )
def _UpperCAmelCase ( self ):
for k in self.dq_store:
print(lowerCamelCase__ )
def __repr__( self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : LRUCache[str | int] =LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 113
|
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = divmod(SCREAMING_SNAKE_CASE ,2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase__: Tuple = "-" if number.startswith("-" ) else ""
UpperCAmelCase__: Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 113
| 1
|
from __future__ import annotations
from PIL import Image
# Define glider example
SCREAMING_SNAKE_CASE__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
SCREAMING_SNAKE_CASE__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = []
for i in range(len(lowerCamelCase__ ) ):
A__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def A ( __UpperCamelCase , __UpperCamelCase ) -> list[Image.Image]:
A__ = []
for _ in range(lowerCamelCase__ ):
# Create output image
A__ = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase__ )) )
A__ = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A__ = 255 - cells[y][x] * 255
A__ = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A__ = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 720
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( a, a, a ):
"""simple docstring"""
_a = UniSpeechSatForSequenceClassification.from_pretrained(a, config=a )
_a = downstream_dict["projector.weight"]
_a = downstream_dict["projector.bias"]
_a = downstream_dict["model.post_net.linear.weight"]
_a = downstream_dict["model.post_net.linear.bias"]
return model
def __a ( a, a, a ):
"""simple docstring"""
_a = UniSpeechSatForAudioFrameClassification.from_pretrained(a, config=a )
_a = downstream_dict["model.linear.weight"]
_a = downstream_dict["model.linear.bias"]
return model
def __a ( a, a, a ):
"""simple docstring"""
_a = UniSpeechSatForXVector.from_pretrained(a, config=a )
_a = downstream_dict["connector.weight"]
_a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_a = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_a = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __a ( a, a, a, a ):
"""simple docstring"""
_a = torch.load(a, map_location="cpu" )
_a = checkpoint["Downstream"]
_a = UniSpeechSatConfig.from_pretrained(a )
_a = WavaVecaFeatureExtractor.from_pretrained(
a, return_attention_mask=a, do_normalize=a )
_a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_a = convert_classification(a, a, a )
elif arch.endswith("ForAudioFrameClassification" ):
_a = convert_diarization(a, a, a )
elif arch.endswith("ForXVector" ):
_a = convert_xvector(a, a, a )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 388
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *UpperCamelCase__ :Optional[Any] , **UpperCamelCase__ :Union[str, Any] ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , "vision" )
self.check_model_type(UpperCamelCase__ )
def __call__( self :Any , UpperCamelCase__ :Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase__ :Optional[Any] ):
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , **UpperCamelCase__ :int ):
return {}, {}, {}
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :Any ):
_a = load_image(UpperCamelCase__ )
_a = image.size
_a = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :int ):
_a = self.model(**UpperCamelCase__ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :Dict ):
_a = model_outputs.predicted_depth
_a = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=UpperCamelCase__ )
_a = prediction.squeeze().cpu().numpy()
_a = (output * 255 / np.max(UpperCamelCase__ )).astype("uint8" )
_a = Image.fromarray(UpperCamelCase__ )
_a = {}
_a = predicted_depth
_a = depth
return output_dict
| 388
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = 0
lowerCamelCase : Any = False
lowerCamelCase : str = 3.0
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Tuple ) -> Dict:
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__snake_case =Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case =torch.nn.Linear(100, 200)
__snake_case =accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case =""""""
__snake_case =model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 707
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__snake_case ={
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Tuple , ) -> None:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Optional[int]:
lowerCAmelCase = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : int , ) -> str:
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , UpperCAmelCase__ )
lowerCAmelCase = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
lowerCAmelCase = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase__ ) )
else:
lowerCAmelCase = ''.join(UpperCAmelCase__ )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 513
| 0
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowercase__: str = 0
if start < end:
lowercase__: Tuple = randint(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[Any] = a[end]
lowercase__: Optional[Any] = a[pivot]
lowercase__: List[str] = temp
lowercase__, lowercase__: List[str] = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: int = 0
lowercase__: str = randint(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = a[end]
lowercase__: Tuple = a[pivot]
lowercase__: Optional[int] = temp
lowercase__: List[Any] = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase__: Dict = new_pivot_index + 1
lowercase__: str = a[new_pivot_index]
lowercase__: str = a[index]
lowercase__: Any = temp
lowercase__: Optional[int] = a[new_pivot_index + 1]
lowercase__: Any = a[end]
lowercase__: List[str] = temp
return new_pivot_index + 1, count
__A = TemporaryFile()
__A = 1_0_0 # 1000 elements are to be sorted
__A ,__A = 0, 1 # mean and standard deviation
__A = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
__A = np.load(outfile)
__A = len(M) - 1
__A = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 586
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "timesformer"
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=8 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Optional[int] = image_size
lowercase__: Optional[Any] = patch_size
lowercase__: Dict = num_channels
lowercase__: Tuple = num_frames
lowercase__: Any = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: str = qkv_bias
lowercase__: Tuple = attention_type
lowercase__: Tuple = drop_path_rate
| 586
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE_ = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE_ = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE_ = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def snake_case_ ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def snake_case_ ( self , a_ , a_ , a_ = False , a_ = False , a_ = False , a_ = False , ):
a_ : str = len(references[0] )
if any(len(a_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a_ : Optional[Any] = [[refs[i] for refs in references] for i in range(a_ )]
a_ : str = TER(
normalized=a_ , no_punct=a_ , asian_support=a_ , case_sensitive=a_ , )
a_ : str = sb_ter.corpus_score(a_ , a_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 370
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "mra"
def __init__( self , a_=5_0_2_6_5 , a_=7_6_8 , a_=1_2 , a_=1_2 , a_=3_0_7_2 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1 , a_=0.02 , a_=1e-5 , a_="absolute" , a_=4 , a_="full" , a_=0 , a_=0 , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
a_ : Optional[int] = vocab_size
a_ : Dict = max_position_embeddings
a_ : str = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : List[Any] = initializer_range
a_ : Dict = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : List[str] = position_embedding_type
a_ : Union[str, Any] = block_per_row
a_ : Tuple = approx_mode
a_ : Optional[Any] = initial_prior_first_n_blocks
a_ : List[str] = initial_prior_diagonal_n_blocks
| 370
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase_= os.path.abspath(lowerCAmelCase_ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase_= convert_pytorch_state_dict_to_flax(lowerCAmelCase_ ,lowerCAmelCase_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_= convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase_ ,lowerCAmelCase_ )
return flax_state_dict
def __a ( lowerCAmelCase_ : Tuple[str] ,lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : Dict[str, jnp.ndarray] ,lowerCAmelCase_ : str ,) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(lowerCAmelCase_ : Tuple[str] ) -> bool:
return len(set(lowerCAmelCase_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_= pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_= pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_= pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_= pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_= pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
UpperCAmelCase_= pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_= pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
UpperCAmelCase_= pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_= pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_= pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_= None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_= pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_= pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase_= pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_= flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_= flax_model.params["""params"""]
else:
UpperCAmelCase_= flax_model.params
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_= flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase_ )
UpperCAmelCase_= {}
UpperCAmelCase_= (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_= (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_= tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase_= pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_= rename_key_and_reshape_tensor(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# add model prefix if necessary
UpperCAmelCase_= (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase_= {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
UpperCAmelCase_= {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_= flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_= flax_model.params["""params"""]
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase_= flax_model.params
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
UpperCAmelCase_= (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_= (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_= tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase_= pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_= rename_key_and_reshape_tensor(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# add model prefix if necessary
UpperCAmelCase_= (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= os.path.abspath(lowerCAmelCase_ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase_= getattr(lowerCAmelCase_ ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase_ ,"""rb""" ) as state_f:
try:
UpperCAmelCase_= from_bytes(lowerCAmelCase_ ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase_= flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa ,lowerCAmelCase_ ) ).values()
if any(lowerCAmelCase_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase_= jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,lowerCAmelCase_ )
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
UpperCAmelCase_= pt_model.state_dict()
UpperCAmelCase_= (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_= (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_= []
UpperCAmelCase_= set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_= flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_= """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase_ ) not in pt_model_dict:
# conv layer
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase_= jnp.transpose(lowerCAmelCase_ ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ) not in pt_model_dict:
# linear layer
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase_= flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase_= """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_= """.""".join(lowerCAmelCase_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_= {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_= key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_= key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase_= key_components[:-3] + [name]
UpperCAmelCase_= """.""".join(lowerCAmelCase_ )
UpperCAmelCase_= key
if flax_key in special_pt_names:
UpperCAmelCase_= special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_= np.asarray(lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ ,np.ndarray ) else flax_tensor
UpperCAmelCase_= torch.from_numpy(lowerCAmelCase_ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase_ )
pt_model.load_state_dict(lowerCAmelCase_ )
# re-transform missing_keys to list
UpperCAmelCase_= list(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 593
|
from manim import *
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_= Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_= Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""CPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(4 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""GPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Model""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
UpperCAmelCase_= Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Loaded Checkpoint""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
UpperCAmelCase_= target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_= MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Disk""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase_= MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 593
| 1
|
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __lowercase ( self :Optional[int] , __lowercase :Union[str, Any] , __lowercase :List[Any] ):
pass
def __lowercase ( self :Any ):
pass
def __lowercase ( self :str ):
pass
def __lowercase ( self :str , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Union[str, Any] , __lowercase :Any=None , **__lowercase :Tuple ):
__lowerCamelCase : Optional[Any] =VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel(__lowercase )
__lowerCamelCase : List[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowercase ( self :str , __lowercase :Any , __lowercase :Dict , __lowercase :Any , __lowercase :int , __lowercase :str=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[str] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Any , __lowercase :Optional[Any] , __lowercase :Tuple=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : int =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Dict ={'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :Optional[Any] , __lowercase :Dict , __lowercase :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Tuple=None , **__lowercase :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : Tuple =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Optional[Any] =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Any =after_output[0].numpy()
__lowerCamelCase : Tuple =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def __lowercase ( self :int , __lowercase :Any , __lowercase :List[Any] , __lowercase :Any , __lowercase :str , __lowercase :int=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : Optional[Any] =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Dict =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Tuple =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : List[Any] =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :List[Any] , __lowercase :np.ndarray , __lowercase :np.ndarray , __lowercase :float ):
__lowerCamelCase : Dict =np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Dict =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def __lowercase ( self :Any ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.get_pretrained_model_and_inputs()
__lowerCamelCase : Optional[Any] =model_a(**__lowercase )
__lowerCamelCase : int =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Tuple =model_a(**__lowercase )
__lowerCamelCase : Optional[int] =after_outputs[0].numpy()
__lowerCamelCase : Optional[Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : Any =13
__lowerCamelCase : Optional[int] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[Any] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Dict ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Optional[Any] , __lowercase :List[Any] , __lowercase :List[str] ):
__lowerCamelCase : Optional[Any] =TFViTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Union[str, Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : str =TFViTModelTester(self )
__lowerCamelCase : Any =TFBertModelTester(self )
__lowerCamelCase : Optional[Any] =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Optional[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__lowerCamelCase : Tuple =13
__lowerCamelCase : str =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[int] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Any , __lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Any , __lowercase :int=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : str =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCamelCase : int =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Union[str, Any] =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : Any =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :Any , __lowercase :Any , __lowercase :Optional[Any] ):
__lowerCamelCase : str =TFDeiTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : List[str] =TFRobertaModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[int] =TFDeiTModelTester(self )
__lowerCamelCase : Any =TFRobertaModelTester(self )
__lowerCamelCase : Dict =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : str =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : str =13
__lowerCamelCase : int =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : int =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : str =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Tuple ):
__lowerCamelCase : int =TFCLIPVisionModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Optional[Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[str] =TFCLIPVisionModelTester(self )
__lowerCamelCase : Union[str, Any] =TFBertModelTester(self )
__lowerCamelCase : str =clip_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : Any =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowercase )
__lowerCamelCase : str =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowerCamelCase : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCamelCase : int =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' )
__lowerCamelCase : str =model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCamelCase : int =np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 363
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[str] , *__lowercase :int , **__lowercase :Any ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 363
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = len(UpperCAmelCase_ )
A__ = len(UpperCAmelCase_ )
A__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ = True
for i in range(UpperCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ = True
if a[i].islower():
A__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : List[str] = PhobertTokenizer
lowerCamelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
SCREAMING_SNAKE_CASE__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l à</w>"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : int ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """Tôi là VinAI Research"""
SCREAMING_SNAKE_CASE__ = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = """Tôi là VinAI Research"""
SCREAMING_SNAKE_CASE__ = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
print(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 196
| 0
|
from heapq import heappop, heappush
import numpy as np
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
a_ : Any = grid.shape
a_ : Dict = [-1, 1, 0, 0]
a_ : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ : int = [(0, source)], set()
a_ : Optional[Any] = np.full((rows, cols) , np.inf )
a_ : Tuple = 0
a_ : Optional[Any] = np.empty((rows, cols) , dtype=__UpperCamelCase )
a_ : Optional[Any] = None
while queue:
(a_) : Any = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ : Tuple = []
while (x, y) != source:
path.append((x, y) )
a_ : Any = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
a_ : Dict = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
a_ : List[str] = dist + 1
a_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _a ( __UpperCamelCase=None , __UpperCamelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class a__ :
lowerCamelCase__: str = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCamelCase__: Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCamelCase__: Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def _a ( __UpperCamelCase ):
try:
int(__UpperCamelCase )
return True
except ValueError:
return False
def _a ( __UpperCamelCase ):
try:
float(__UpperCamelCase )
return True
except ValueError:
return False
class a__ :
def __init__( self : int , lowerCamelCase_ : int ):
a_ : Union[str, Any] = args
a_ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
a_ : List[Any] = csv.DictReader(lowerCamelCase_ )
for row in reader:
a_ : List[str] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
a_ : Optional[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
a_ : Dict = float(row["""result"""] )
def UpperCAmelCase( self : Union[str, Any] ):
a_ , a_ : Tuple = plt.subplots()
a_ : List[Any] = """Time usage""" if self.args.is_time else """Memory usage"""
a_ : Optional[int] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
a_ : List[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
a_ : Union[str, Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
a_ : List[str] = self.result_dict[model_name]["""result"""]
((a_) , (a_)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
a_ : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
a_ : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , )
else:
a_ : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((a_) , (a_)) : List[str] = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
a_ : List[Any] = np.asarray(lowerCamelCase_ , lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_ , lowerCamelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_ , lowerCamelCase_ , """--""" )
title_str += F''' {label_model_name} vs.'''
a_ : int = title_str[:-4]
a_ : Tuple = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _a ( ):
a_ : Tuple = HfArgumentParser(__UpperCamelCase )
a_ : Dict = parser.parse_args_into_dataclasses()[0]
a_ : Dict = Plot(args=__UpperCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 478
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'canine'
def __init__( self , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_63_84 , snake_case_=16 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0 , snake_case_=0XE000 , snake_case_=0XE001 , snake_case_=4 , snake_case_=4 , snake_case_=8 , snake_case_=1_63_84 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =max_position_embeddings
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =initializer_range
lowercase =type_vocab_size
lowercase =layer_norm_eps
# Character config:
lowercase =downsampling_rate
lowercase =upsampling_kernel_size
lowercase =num_hash_functions
lowercase =num_hash_buckets
lowercase =local_transformer_stride
| 72
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''model_save_dir''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.get('''latest_model_name''' , UpperCamelCase__ )
def __call__( self : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def __A ( UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE : List[str] = '''CPUExecutionProvider'''
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE : Tuple = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[Union[bool, str, None]] = None , UpperCamelCase__ : Optional[Union[str, None]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional["ort.SessionOptions"] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE : int = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).parent
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCamelCase__ ).name
SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = None
if len(str(UpperCamelCase__ ).split('''@''' ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 248
| 0
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __a ( nn.Module ):
def __init__( self : int , lowercase__ : Any) ->Optional[Any]:
"""simple docstring"""
super().__init__()
_lowercase = torchvision.models.resnetaaa(pretrained=lowercase__)
_lowercase = list(model.children())[:-2]
_lowercase = nn.Sequential(*lowercase__)
_lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _UpperCAmelCase ( self : Dict , lowercase__ : List[Any]) ->Optional[int]:
"""simple docstring"""
_lowercase = self.pool(self.model(lowercase__))
_lowercase = torch.flatten(lowercase__ , start_dim=2)
_lowercase = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __a ( _snake_case ):
def __init__( self : Any , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Any , lowercase__ : str) ->Tuple:
"""simple docstring"""
_lowercase = [json.loads(lowercase__) for l in open(lowercase__)]
_lowercase = os.path.dirname(lowercase__)
_lowercase = tokenizer
_lowercase = labels
_lowercase = len(lowercase__)
_lowercase = max_seq_length
_lowercase = transforms
def __len__( self : int) ->List[Any]:
"""simple docstring"""
return len(self.data)
def __getitem__( self : Any , lowercase__ : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowercase__))
_lowercase , _lowercase , _lowercase = sentence[0], sentence[1:-1], sentence[-1]
_lowercase = sentence[: self.max_seq_length]
_lowercase = torch.zeros(self.n_classes)
_lowercase = 1
_lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""])).convert("""RGB""")
_lowercase = self.transforms(lowercase__)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowercase = Counter()
for row in self.data:
label_freqs.update(row["""label"""])
return label_freqs
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = [len(row["""sentence"""] ) for row in batch]
_lowercase , _lowercase = len(snake_case_ ), max(snake_case_ )
_lowercase = torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
_lowercase = torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case_ , snake_case_ ) ):
_lowercase = input_row["""sentence"""]
_lowercase = 1
_lowercase = torch.stack([row["""image"""] for row in batch] )
_lowercase = torch.stack([row["""label"""] for row in batch] )
_lowercase = torch.stack([row["""image_start_token"""] for row in batch] )
_lowercase = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _SCREAMING_SNAKE_CASE ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _SCREAMING_SNAKE_CASE ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 572
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=7 , lowercase__ : Dict=3 , lowercase__ : Optional[int]=18 , lowercase__ : Any=30 , lowercase__ : Tuple=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : Tuple=True , lowercase__ : Optional[int]=None , lowercase__ : Any=True , lowercase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase__ : Tuple=[0.5, 0.5, 0.5] , lowercase__ : Optional[Any]=False , ) ->str:
"""simple docstring"""
_lowercase = size if size is not None else {"""height""": 20, """width""": 20}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_reduce_labels
def _UpperCAmelCase ( self : Union[str, Any]) ->str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(dataset[0]["""file"""] )
_lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(ds[0]["""file"""] )
_lowercase = Image.open(ds[1]["""file"""] )
_lowercase = Image.open(ds[2]["""file"""] )
_lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __a ( _snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BeitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any) ->str:
"""simple docstring"""
_lowercase = BeitImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , """do_resize"""))
self.assertTrue(hasattr(lowercase__ , """size"""))
self.assertTrue(hasattr(lowercase__ , """do_center_crop"""))
self.assertTrue(hasattr(lowercase__ , """center_crop"""))
self.assertTrue(hasattr(lowercase__ , """do_normalize"""))
self.assertTrue(hasattr(lowercase__ , """image_mean"""))
self.assertTrue(hasattr(lowercase__ , """image_std"""))
def _UpperCAmelCase ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
_lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase__)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : str) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
_lowercase = []
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
_lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test not batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_batch_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
def _UpperCAmelCase ( self : Dict) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 1_50)
_lowercase = True
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
| 572
| 1
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : int = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : int = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : str = [parquet_path]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_SCREAMING_SNAKE_CASE : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Tuple = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE : Optional[int] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : List[str] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : str = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = ParquetDatasetWriter(SCREAMING_SNAKE_CASE__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Tuple = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Tuple = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Any = Dataset.from_dict(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetWriter(SCREAMING_SNAKE_CASE__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE__ ) == expected
| 533
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Optional[Any] = 16
UpperCAmelCase_ : List[str] = 32
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE : str = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
model.eval()
_SCREAMING_SNAKE_CASE : Dict = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
_SCREAMING_SNAKE_CASE : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE : str = metric.compute()
return eval_metric["accuracy"]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Optional[int] = config["""lr"""]
_SCREAMING_SNAKE_CASE : Any = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : Tuple = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE : Any = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
_SCREAMING_SNAKE_CASE : Any = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load("""glue""" , """mrpc""" )
_SCREAMING_SNAKE_CASE : int = num_epochs
if args.partial_train_epoch is not None:
_SCREAMING_SNAKE_CASE : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE : Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_SCREAMING_SNAKE_CASE : int = int(SCREAMING_SNAKE_CASE__ ) + 1
_SCREAMING_SNAKE_CASE : List[str] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint performance:""" , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Any = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_SCREAMING_SNAKE_CASE : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
_SCREAMING_SNAKE_CASE : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_SCREAMING_SNAKE_CASE : int = f"""epoch_{epoch}"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = accuracy
_SCREAMING_SNAKE_CASE : Any = lr_scheduler.get_lr()[0]
_SCREAMING_SNAKE_CASE : Any = optimizer.param_groups[0]["""lr"""]
_SCREAMING_SNAKE_CASE : Dict = epoch
_SCREAMING_SNAKE_CASE : Union[str, Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE__ , default=2 , help="""Number of train epochs.""" , )
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 533
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__A) - np.asarray(__A)) ** 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__A , __A)) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''')
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
print('''With Numpy''')
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
benchmark()
| 352
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , ) -> Any:
"""simple docstring"""
_a = size if size is not None else {'''shortest_edge''': 20}
_a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def a__ (self ) -> int:
"""simple docstring"""
_a = MobileNetVaImageProcessingTester(self )
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''crop_size''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 352
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCamelCase_ ( lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowercase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
__lowercase : ClassVar[Features] = Features({"text": Value("string" )} )
__lowercase : ClassVar[Features] = Features({"summary": Value("string" )} )
__lowercase : str = "text"
__lowercase : str = "summary"
@property
def lowercase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 147
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147
| 1
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class snake_case :
def __init__( self : int , a__ : str = None , a__ : list = [] ) -> Optional[int]:
'''simple docstring'''
_A = 0
_A = choices
_A = prompt
if sys.platform == "win32":
_A = '''*'''
else:
_A = '''➔ '''
def a_ ( self : int , a__ : Optional[Any] , a__ : str = "" ) -> Dict:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __snake_case )
else:
forceWrite(self.choices[index] , __snake_case )
def a_ ( self : Union[str, Any] , a__ : int ) -> List[str]:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__snake_case )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def a_ ( self : Union[str, Any] , a__ : Direction , a__ : int = 1 ) -> str:
'''simple docstring'''
_A = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__snake_case )
move_cursor(__snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__snake_case )] for number in range(10 )] )
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = int(chr(self.current_selection ) )
_A = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __snake_case )
else:
return
else:
return
def a_ ( self : int , a__ : int = 0 ) -> Optional[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
_A = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__snake_case )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
_A = int(builtins.input() )
except ValueError:
_A = default_choice
else:
_A = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__snake_case , "\n" )
return choice
| 705
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 621
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
if point:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for item in point:
if not isinstance(__lowerCAmelCase , (int, float) ):
snake_case__ : Any = (
'''Expected a list of numbers as input, found '''
f"""{type(__lowerCAmelCase ).__name__}"""
)
raise TypeError(__lowerCAmelCase )
else:
snake_case__ : Dict = f"""Expected a list of numbers as input, found {type(__lowerCAmelCase ).__name__}"""
raise TypeError(__lowerCAmelCase )
else:
raise ValueError('''Missing an input''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ = logging.get_logger(__name__)
A__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : List[str] = """nat"""
__lowerCAmelCase : Union[str, Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[str] ,__lowercase :int=4 ,__lowercase :int=3 ,__lowercase :Optional[int]=6_4 ,__lowercase :Tuple=[3, 4, 6, 5] ,__lowercase :List[Any]=[2, 4, 8, 1_6] ,__lowercase :Optional[int]=7 ,__lowercase :Optional[int]=3.0 ,__lowercase :List[Any]=True ,__lowercase :List[str]=0.0 ,__lowercase :Optional[Any]=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :str=0.02 ,__lowercase :Optional[Any]=1e-5 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[str]=None ,__lowercase :List[Any]=None ,**__lowercase :Optional[Any] ,):
super().__init__(**__lowercase )
snake_case__ : str = patch_size
snake_case__ : str = num_channels
snake_case__ : Dict = embed_dim
snake_case__ : List[Any] = depths
snake_case__ : Any = len(__lowercase )
snake_case__ : List[str] = num_heads
snake_case__ : Dict = kernel_size
snake_case__ : Optional[int] = mlp_ratio
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : int = hidden_act
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[Any] = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
snake_case__ : Union[str, Any] = layer_scale_init_value
snake_case__ : Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowercase ) + 1 )]
snake_case__ , snake_case__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
| 252
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , UpperCamelCase = "cpu" , UpperCamelCase = "openai/clip-vit-large-patch14" ) -> None:
__a = device
__a = CLIPTokenizerFast.from_pretrained(UpperCamelCase )
__a = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__a = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__a = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a = torchvision.transforms.Resize(224 )
__a = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
__a = self.resize(UpperCamelCase )
__a = self.center_crop(UpperCamelCase )
__a = self.normalize(UpperCamelCase )
return images
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ) -> List[str]:
__a = self.tokenizer(text=UpperCamelCase , **UpperCamelCase )
__a = self.preprocess_img(UpperCamelCase )
__a = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , UpperCamelCase=10 , UpperCamelCase=0.01 , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase="image" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> None:
super().__init__()
__a = None
__a = device if device else get_device()
if vqgan:
__a = vqgan
else:
__a = load_vqgan(self.device , conf_path=UpperCamelCase , ckpt_path=UpperCamelCase )
self.vqgan.eval()
if clip:
__a = clip
else:
__a = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__a = ProcessorGradientFlow(device=self.device )
__a = iterations
__a = lr
__a = log
__a = make_grid
__a = return_val
__a = quantize
__a = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=5 , UpperCamelCase=True ) -> List[str]:
__a = []
if output_path is None:
__a = './animation.gif'
if input_path is None:
__a = self.save_path
__a = sorted(glob(input_path + '/*' ) )
if not len(UpperCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(UpperCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__a = total_duration / len(UpperCamelCase )
__a = [frame_duration] * len(UpperCamelCase )
if extend_frames:
__a = 1.5
__a = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(UpperCamelCase ) )
imageio.mimsave(UpperCamelCase , UpperCamelCase , duration=UpperCamelCase )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None ) -> Any:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__a = preprocess(Image.open(UpperCamelCase ) , target_image_size=256 ).to(self.device )
__a = preprocess_vqgan(UpperCamelCase )
__a , *__a = self.vqgan.encode(UpperCamelCase )
return z
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
__a = self.latent.detach().requires_grad_()
__a = base_latent + transform_vector
if self.quantize:
__a , *__a = self.vqgan.quantize(UpperCamelCase )
else:
__a = trans_latent
return self.vqgan.decode(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[str]:
__a = self.clip_preprocessor(text=UpperCamelCase , images=UpperCamelCase , return_tensors='pt' , padding=UpperCamelCase )
__a = self.clip(**UpperCamelCase )
__a = clip_outputs.logits_per_image
if weights is not None:
__a = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
__a = self._get_clip_similarity(pos_prompts['prompts'] , UpperCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__a = self._get_clip_similarity(neg_prompts['prompts'] , UpperCamelCase , weights=neg_prompts['weights'] )
else:
__a = torch.tensor([1] , device=self.device )
__a = -torch.log(UpperCamelCase ) + torch.log(UpperCamelCase )
return loss
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__a = torch.randn_like(self.latent , requires_grad=UpperCamelCase , device=self.device )
__a = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a = self._add_vector(UpperCamelCase )
__a = loop_post_process(UpperCamelCase )
__a = self._get_CLIP_loss(UpperCamelCase , UpperCamelCase , UpperCamelCase )
print('CLIP loss' , UpperCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
wandb.init(reinit=UpperCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__a = Image.open(UpperCamelCase )
__a = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(UpperCamelCase ) )
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
if not prompts:
return []
__a = []
__a = []
if isinstance(UpperCamelCase , UpperCamelCase ):
__a = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(UpperCamelCase , (tuple, list) ):
__a = prompt[0]
__a = float(prompt[1] )
elif ":" in prompt:
__a , __a = prompt.split(':' )
__a = float(UpperCamelCase )
else:
__a = prompt
__a = 1.0
processed_prompts.append(UpperCamelCase )
weights.append(UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase , device=self.device ),
}
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=None , ) -> Any:
if image_path:
__a = self._get_latent(UpperCamelCase )
else:
__a = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__a = self.process_prompts(UpperCamelCase )
__a = self.process_prompts(UpperCamelCase )
if save_final and save_path is None:
__a = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(UpperCamelCase ):
os.makedirs(UpperCamelCase )
else:
__a = save_path + '_' + get_timestamp()
os.makedirs(UpperCamelCase )
__a = save_path
__a = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(UpperCamelCase ) )
__a = loop_post_process(UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ):
if show_intermediate:
show_pil(UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(UpperCamelCase )} )
if show_final:
show_pil(UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 703
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE ( a_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE ( a_ : str ):
# word like '180' or '身高' or '神'
for char in word:
__a = ord(a_ )
if not _is_chinese_char(a_ ):
return 0
return 1
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = set()
for token in tokens:
__a = len(a_ ) > 1 and is_chinese(a_ )
if chinese_word:
word_set.add(a_ )
__a = list(a_ )
return word_list
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : set() ):
if not chinese_word_set:
return bert_tokens
__a = max([len(a_ ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(a_ )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , a_ )
for i in range(a_ , 1 , -1 ):
__a = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = '##' + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : LTP , a_ : BertTokenizer ):
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__a = [get_chinese_word(a_ ) for r in res]
ltp_res.extend(a_ )
assert len(a_ ) == len(a_ )
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a_ , truncation=a_ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(a_ ) == len(a_ )
__a = []
for input_ids, chinese_word in zip(a_ , a_ ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(a_ )
input_tokens.append(a_ )
__a = add_sub_symbol(a_ , a_ )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a_ ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ):
ref_id.append(a_ )
ref_ids.append(a_ )
assert len(a_ ) == len(a_ )
return ref_ids
def SCREAMING_SNAKE_CASE ( a_ : str ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(a_ , a_ , a_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__a = [json.dumps(a_ ) + '\n' for ref in ref_ids]
f.writelines(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase_ = parser.parse_args()
main(args)
| 490
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class _snake_case :
def __init__( self ):
a :list[Any] = []
a :int = 0
a :int = 0
def SCREAMING_SNAKE_CASE__ ( self ):
return self.head == self.tail
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
self.data.append(_lowerCamelCase )
a :Optional[Any] = self.tail + 1
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.data[self.head]
a :Any = self.head + 1
return ret
def SCREAMING_SNAKE_CASE__ ( self ):
return self.tail - self.head
def SCREAMING_SNAKE_CASE__ ( self ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :Any = data
a :MyNode | None = None
a :MyNode | None = None
a :int = 1
def SCREAMING_SNAKE_CASE__ ( self ):
return self.data
def SCREAMING_SNAKE_CASE__ ( self ):
return self.left
def SCREAMING_SNAKE_CASE__ ( self ):
return self.right
def SCREAMING_SNAKE_CASE__ ( self ):
return self.height
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = data
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = node
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = node
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = height
def __lowerCamelCase ( UpperCAmelCase_ : MyNode | None ):
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if a > b:
return a
return b
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
a :str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase_ )
a :Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
a :str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
a :str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase_ )
a :Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
a :Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
a :Dict = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase_ ) )
return right_rotation(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
a :List[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase_ ) )
return left_rotation(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : MyNode | None , UpperCAmelCase_ : Any ):
"""simple docstring"""
if node is None:
return MyNode(UpperCAmelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
a :Dict = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
a :List[str] = right_rotation(UpperCAmelCase_ )
else:
a :Union[str, Any] = lr_rotation(UpperCAmelCase_ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
a :List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
a :Tuple = rl_rotation(UpperCAmelCase_ )
else:
a :Any = left_rotation(UpperCAmelCase_ )
a :Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
return node
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
while True:
a :Any = root.get_right()
if right_child is None:
break
a :Dict = right_child
return root.get_data()
def __lowerCamelCase ( UpperCAmelCase_ : MyNode ):
"""simple docstring"""
while True:
a :Optional[Any] = root.get_left()
if left_child is None:
break
a :str = left_child
return root.get_data()
def __lowerCamelCase ( UpperCAmelCase_ : MyNode , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Optional[int] = root.get_left()
a :str = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
a :List[str] = get_left_most(UpperCAmelCase_ )
root.set_data(UpperCAmelCase_ )
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
elif left_child is not None:
a :List[str] = left_child
elif right_child is not None:
a :Union[str, Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
if get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
a :Dict = left_rotation(UpperCAmelCase_ )
else:
a :List[Any] = rl_rotation(UpperCAmelCase_ )
elif get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
a :List[Any] = right_rotation(UpperCAmelCase_ )
else:
a :Dict = lr_rotation(UpperCAmelCase_ )
a :Tuple = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase_ )
return root
class _snake_case :
def __init__( self ):
a :MyNode | None = None
def SCREAMING_SNAKE_CASE__ ( self ):
return get_height(self.root )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
print('''insert:''' + str(_lowerCamelCase ) )
a :Dict = insert_node(self.root , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
print('''delete:''' + str(_lowerCamelCase ) )
if self.root is None:
print('''Tree is empty!''' )
return
a :Dict = del_node(self.root , _lowerCamelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
a :List[str] = ''''''
a :Union[str, Any] = MyQueue()
q.push(self.root )
a :Optional[Any] = self.get_height()
if layer == 0:
return output
a :int = 0
while not q.is_empty():
a :Tuple = q.pop()
a :Optional[int] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowerCamelCase )
q.push(_lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
a :Dict = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _lowerCamelCase ) - 1:
a :Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case : Tuple = AVLtree()
snake_case : str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 445
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _snake_case ( _snake_case ):
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Optional[Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
import apache_beam as beam
a :Any = beam.io.parquetio.WriteToParquet
a :Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
a :str = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Dict = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :List[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
a :Any = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 445
| 1
|
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE__ = set()
return any(
node not in visited and depth_first_search(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
for node in graph )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
visited.add(UpperCamelCase_ )
rec_stk.add(UpperCamelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 400
|
import doctest
from collections import deque
import numpy as np
class lowercase__ :
def __init__( self : Dict ):
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(UpperCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 400
| 1
|
from __future__ import annotations
def A__ ( lowercase: List[Any] ) -> bool:
return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = checkpoint
lowercase__ : List[str] = {}
lowercase__ : Any = vae_state_dict['''encoder.conv_in.weight''']
lowercase__ : Any = vae_state_dict['''encoder.conv_in.bias''']
lowercase__ : str = vae_state_dict['''encoder.conv_out.weight''']
lowercase__ : List[str] = vae_state_dict['''encoder.conv_out.bias''']
lowercase__ : List[str] = vae_state_dict['''encoder.norm_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''encoder.norm_out.bias''']
lowercase__ : List[str] = vae_state_dict['''decoder.conv_in.weight''']
lowercase__ : int = vae_state_dict['''decoder.conv_in.bias''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.conv_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''decoder.conv_out.bias''']
lowercase__ : str = vae_state_dict['''decoder.norm_out.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.norm_out.bias''']
lowercase__ : Optional[int] = vae_state_dict['''quant_conv.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''quant_conv.bias''']
lowercase__ : str = vae_state_dict['''post_quant_conv.weight''']
lowercase__ : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase__ : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase__ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase__ : List[str] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
lowercase__ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowercase__ : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowercase__ : int = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Union[str, Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase__ : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowercase__ : Optional[Any] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : Tuple = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase__ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
lowercase__ : int = num_up_blocks - 1 - i
lowercase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase__ : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase__ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase__ : Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : List[str] = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : List[str] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowercase__ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase__ : Tuple = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
# Only support V1
lowercase__ : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase__ : List[str] = io.BytesIO(r.content )
lowercase__ : Union[str, Any] = OmegaConf.load(__lowerCamelCase )
lowercase__ : int = 5_12
lowercase__ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase__ : int = {}
with safe_open(__lowerCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
lowercase__ : Optional[int] = f.get_tensor(__lowerCamelCase )
else:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )['''state_dict''']
# Convert the VAE model.
lowercase__ : Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
lowercase__ : Tuple = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 560
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def lowercase__( A , A ):
snake_case__ : str = iter(__UpperCamelCase )
while True:
snake_case__ : List[Any] = tuple(itertools.islice(__UpperCamelCase , __UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowercase__( A ):
snake_case__ : str = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case__ : str = ''
if len(__UpperCamelCase ) < 2:
return dirty
for i in range(len(__UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__UpperCamelCase ) & 1:
clean += "X"
return clean
def lowercase__( A ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
snake_case__ : Optional[int] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case__ : Any = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__UpperCamelCase )
return table
def lowercase__( A , A ):
snake_case__ : Optional[Any] = generate_table(__UpperCamelCase )
snake_case__ : str = prepare_input(__UpperCamelCase )
snake_case__ : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCamelCase , 2 ):
snake_case__ , snake_case__ : Tuple = divmod(table.index(__UpperCamelCase ) , 5 )
snake_case__ , snake_case__ : List[str] = divmod(table.index(__UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowercase__( A , A ):
snake_case__ : Any = generate_table(__UpperCamelCase )
snake_case__ : List[str] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCamelCase , 2 ):
snake_case__ , snake_case__ : Optional[Any] = divmod(table.index(__UpperCamelCase ) , 5 )
snake_case__ , snake_case__ : Any = divmod(table.index(__UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 708
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =42
_lowerCAmelCase =42
def __init__( self : List[str] , _lowerCamelCase : UNetaDModel , _lowerCamelCase : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 2_0_0_0 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , **_lowerCamelCase : Optional[int] , ):
snake_case__ : Union[str, Any] = self.unet.config.sample_size
snake_case__ : Tuple = (batch_size, 3, img_size, img_size)
snake_case__ : List[str] = self.unet
snake_case__ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase ) * self.scheduler.init_noise_sigma
snake_case__ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
self.scheduler.set_sigmas(_lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case__ : Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
snake_case__ : List[str] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# prediction step
snake_case__ : Dict = model(_lowerCamelCase , _lowerCamelCase ).sample
snake_case__ : Tuple = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
snake_case__ , snake_case__ : str = output.prev_sample, output.prev_sample_mean
snake_case__ : Optional[int] = sample_mean.clamp(0 , 1 )
snake_case__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Optional[Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 303
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase_ = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase_ = "|".join(sys.argv[1:])
UpperCamelCase_ = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 611
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCamelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCamelCase_ = {
"ctrl": 2_5_6,
}
UpperCamelCase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(UpperCamelCase )
return pairs
class a ( __UpperCAmelCase ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict = CONTROL_CODES
def __init__( self : Dict , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any]="<unk>" , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__lowerCAmelCase = {}
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowerCAmelCase = get_pairs(snake_case__ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(snake_case__ ):
try:
__lowerCAmelCase = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = new_word
if len(snake_case__ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(snake_case__ )
__lowerCAmelCase = "@@ ".join(snake_case__ )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Any , snake_case__ : int ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
__lowerCAmelCase = 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 611
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase__ = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class lowercase ( a_ ):
def __init__( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : Optional[int] = dataset
UpperCAmelCase_ : Optional[int] = len(__a) if n_tasks is None else n_tasks
UpperCAmelCase_ : Tuple = n_copies
def __iter__( self) -> Union[str, Any]:
UpperCAmelCase_ : str = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
UpperCAmelCase_ : List[str] = self.tokenizer(__a , padding=__a , return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowercase ( a_ ):
def __init__( self , _snake_case , _snake_case , _snake_case) -> Optional[Any]:
UpperCAmelCase_ : Any = start_length
UpperCAmelCase_ : Union[str, Any] = eof_strings
UpperCAmelCase_ : Any = tokenizer
def __call__( self , _snake_case , _snake_case , **_snake_case) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
UpperCAmelCase_ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(__a)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ : List[Any] = re.split('(%s)' % '|'.join(__snake_case ) ,__snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=2_0 ,**UpperCamelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['ids'].shape[-1]
UpperCAmelCase_ : Tuple = accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['ids'][:, : batch['input_len']] ,num_return_sequences=__snake_case ,**__snake_case )
# each task is generated batch_size times
UpperCAmelCase_ : int = batch['task_id'].repeat(__snake_case )
UpperCAmelCase_ : str = accelerator.pad_across_processes(
__snake_case ,dim=1 ,pad_index=tokenizer.pad_token_id )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : int = generated_tokens.cpu().numpy()
UpperCAmelCase_ : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case ,__snake_case ):
gen_token_dict[task].append(__snake_case )
UpperCAmelCase_ : str = [[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : Optional[int] = tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case ,clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def SCREAMING_SNAKE_CASE( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = HfArgumentParser(__snake_case )
UpperCAmelCase_ : Dict = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = 'false'
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : Any = Accelerator()
set_seed(args.seed ,device_specific=__snake_case )
# Load model and tokenizer
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Tuple = tokenizer.eos_token
UpperCAmelCase_ : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : Tuple = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 ,__snake_case ,__snake_case )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Union[str, Any] = load_dataset('openai_humaneval' )
UpperCAmelCase_ : Any = load_metric('code_eval' )
UpperCAmelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
UpperCAmelCase_ : Optional[Any] = args.n_samples // args.batch_size
UpperCAmelCase_ : Optional[Any] = TokenizedDataset(__snake_case ,human_eval['test'] ,n_copies=__snake_case ,n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Dict = DataLoader(__snake_case ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Dict = code_eval_metric.compute(references=[''] ,predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.prepare(__snake_case ,__snake_case )
UpperCAmelCase_ : Optional[Any] = complete_code(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,n_tasks=__snake_case ,batch_size=args.batch_size ,**__snake_case ,)
if accelerator.is_main_process:
UpperCAmelCase_ : List[Any] = []
for task in tqdm(range(__snake_case ) ):
UpperCAmelCase_ : List[Any] = human_eval['test'][task]['test']
UpperCAmelCase_ : Optional[int] = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = code_eval_metric.compute(
references=__snake_case ,predictions=__snake_case ,num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file ,'w' ) as fp:
json.dump(__snake_case ,__snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
import numpy
class lowercase :
def __init__( self , _snake_case , _snake_case) -> None:
UpperCAmelCase_ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase_ : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase_ : List[str] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase_ : Dict = numpy.random.rand(3 , 1)
# Real output values provided.
UpperCAmelCase_ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase_ : Union[str, Any] = numpy.zeros(output_array.shape)
def _snake_case ( self) -> numpy.ndarray:
UpperCAmelCase_ : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _snake_case ( self) -> None:
UpperCAmelCase_ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
UpperCAmelCase_ : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
UpperCAmelCase_ : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> None:
for iteration in range(1 , iterations + 1):
UpperCAmelCase_ : int = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase_ : List[Any] = numpy.mean(numpy.square(output - self.feedforward()))
print(F"""Iteration {iteration} Loss: {loss}""")
def _snake_case ( self , _snake_case) -> int:
UpperCAmelCase_ : Optional[int] = input_arr
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
UpperCAmelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE( ) -> int:
UpperCAmelCase_ : Optional[int] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
UpperCAmelCase_ : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase_ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase ,output_array=UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase ,iterations=1_0 ,give_loss=UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 471
| 0
|
import qiskit
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 248
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Optional[Any] = {}
class __UpperCAmelCase ( _A ):
__lowercase = """llama"""
__lowercase = ["""past_key_values"""]
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=40_96 , lowerCAmelCase_=1_10_08 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=None , lowerCAmelCase_="silu" , lowerCAmelCase_=20_48 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-6 , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=False , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_snake_case = num_attention_heads
_snake_case = num_key_value_heads
_snake_case = hidden_act
_snake_case = initializer_range
_snake_case = rms_norm_eps
_snake_case = pretraining_tp
_snake_case = use_cache
_snake_case = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowerCamelCase ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
_snake_case = self.rope_scaling.get('type' , lowerCAmelCase_ )
_snake_case = self.rope_scaling.get('factor' , lowerCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 713
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = self.vocab_size - 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = OpenAIGPTForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowercase = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = inputs_dict['labels']
_snake_case = inputs_dict['labels']
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase_ )
_snake_case = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is
_snake_case = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_snake_case = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 542
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =['''image_processor''', '''tokenizer''']
_UpperCAmelCase ='''ChineseCLIPImageProcessor'''
_UpperCAmelCase =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: Optional[Any] , a: Union[str, Any]=None , a: List[Any]=None , **a: Optional[Any]) ->int:
'''simple docstring'''
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
a_ = kwargs.pop("feature_extractor")
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(a , a)
a_ = self.image_processor
def __call__( self: Optional[int] , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Optional[int]=None , **a: List[str]) ->int:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a_ = self.tokenizer(a , return_tensors=a , **a)
if images is not None:
a_ = self.image_processor(a , return_tensors=a , **a)
if text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a) , tensor_type=a)
def _lowerCAmelCase ( self: List[Any] , *a: Union[str, Any] , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a)
def _lowerCAmelCase ( self: Any , *a: int , **a: Optional[int]) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*a , **a)
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = self.tokenizer.model_input_names
a_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
| 685
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case_ ( __lowercase , __lowercase=0.9_9_9 , __lowercase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowercase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowercase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : str = []
for i in range(__lowercase ):
UpperCAmelCase_ : str = i / num_diffusion_timesteps
UpperCAmelCase_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowercase ) / alpha_bar_fn(__lowercase ) , __lowercase ) )
return torch.tensor(__lowercase , dtype=torch.floataa )
class lowerCAmelCase__( snake_case__ , snake_case__ ):
'''simple docstring'''
A_ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
A_ : int = 2
@register_to_config
def __init__( self : Any , __snake_case : int = 1_000 , __snake_case : float = 0.00_085 , __snake_case : float = 0.012 , __snake_case : str = "linear" , __snake_case : Optional[Union[np.ndarray, List[float]]] = None , __snake_case : str = "epsilon" , __snake_case : str = "linspace" , __snake_case : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
UpperCAmelCase_ : Optional[Any] = torch.tensor(__snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Dict = torch.linspace(__snake_case , __snake_case , __snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(__snake_case )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase_ : Tuple = 1.0 - self.betas
UpperCAmelCase_ : int = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : Dict , __snake_case : Optional[Any] , __snake_case : List[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
UpperCAmelCase_ : Any = self.timesteps
UpperCAmelCase_ : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ : Optional[int] = 1 if len(__snake_case ) > 1 else 0
else:
UpperCAmelCase_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(__snake_case ) else timestep
UpperCAmelCase_ : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
UpperCAmelCase_ : str = self.index_for_timestep(__snake_case )
if self.state_in_first_order:
UpperCAmelCase_ : str = self.sigmas[step_index]
else:
UpperCAmelCase_ : Optional[int] = self.sigmas_interpol[step_index]
UpperCAmelCase_ : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Union[str, torch.device] = None , __snake_case : Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = num_inference_steps
UpperCAmelCase_ : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ : Tuple = np.linspace(0 , num_train_timesteps - 1 , __snake_case , dtype=__snake_case )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : Tuple = (np.arange(0 , __snake_case ) * step_ratio).round()[::-1].copy().astype(__snake_case )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[str] = (np.arange(__snake_case , 0 , -step_ratio )).round().copy().astype(__snake_case )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCAmelCase_ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ : Dict = torch.from_numpy(np.log(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ : Optional[Any] = np.interp(__snake_case , np.arange(0 , len(__snake_case ) ) , __snake_case )
UpperCAmelCase_ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ : int = torch.from_numpy(__snake_case ).to(device=__snake_case )
# interpolate sigmas
UpperCAmelCase_ : Optional[int] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase_ : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__snake_case ).startswith('''mps''' ):
# mps does not support float64
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(__snake_case ).to(__snake_case , dtype=torch.floataa )
else:
UpperCAmelCase_ : str = torch.from_numpy(__snake_case ).to(__snake_case )
# interpolate timesteps
UpperCAmelCase_ : int = self.sigma_to_t(__snake_case ).to(__snake_case , dtype=timesteps.dtype )
UpperCAmelCase_ : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase_ : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ : str = defaultdict(__snake_case )
def _lowerCamelCase ( self : int , __snake_case : str ):
'''simple docstring'''
# get log sigma
UpperCAmelCase_ : Optional[Any] = sigma.log()
# get distribution
UpperCAmelCase_ : List[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase_ : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase_ : Optional[int] = low_idx + 1
UpperCAmelCase_ : Any = self.log_sigmas[low_idx]
UpperCAmelCase_ : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ : Optional[int] = (low - log_sigma) / (low - high)
UpperCAmelCase_ : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ : int = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ : Union[str, Any] = t.view(sigma.shape )
return t
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.sample is None
def _lowerCamelCase ( self : Any , __snake_case : Union[torch.FloatTensor, np.ndarray] , __snake_case : Union[float, torch.FloatTensor] , __snake_case : Union[torch.FloatTensor, np.ndarray] , __snake_case : bool = True , ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.index_for_timestep(__snake_case )
# advance index counter by 1
UpperCAmelCase_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(__snake_case ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ : Optional[Any] = self.sigmas[step_index]
UpperCAmelCase_ : Optional[Any] = self.sigmas_interpol[step_index + 1]
UpperCAmelCase_ : int = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase_ : Dict = self.sigmas[step_index - 1]
UpperCAmelCase_ : Dict = self.sigmas_interpol[step_index]
UpperCAmelCase_ : Union[str, Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ : List[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase_ : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase_ : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase_ : List[str] = sigma_next - sigma_hat
UpperCAmelCase_ : Union[str, Any] = self.sample
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__snake_case ):
# mps does not support float64
UpperCAmelCase_ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = self.timesteps.to(original_samples.device )
UpperCAmelCase_ : List[str] = timesteps.to(original_samples.device )
UpperCAmelCase_ : Tuple = [self.index_for_timestep(__snake_case , __snake_case ) for t in timesteps]
UpperCAmelCase_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Any = sigma.unsqueeze(-1 )
UpperCAmelCase_ : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCAmelCase ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def __UpperCAmelCase ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A , __A , bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def __UpperCAmelCase ( __A , __A="facebook/mbart-large-en-ro" , __A=False , __A=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A , map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A , vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
A = parser.parse_args()
A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 475
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 475
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowerCAmelCase : List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowerCAmelCase : Any = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowerCAmelCase : Any = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCAmelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowerCAmelCase : Any = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase__ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase__ = collections.defaultdict(_lowerCAmelCase )
UpperCAmelCase__ = collections.defaultdict(_lowerCAmelCase )
UpperCAmelCase__ = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowerCAmelCase ):
UpperCAmelCase__ = None
if _re_tf_models.match(_lowerCAmelCase ) is not None:
UpperCAmelCase__ = tf_models
UpperCAmelCase__ = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
UpperCAmelCase__ = flax_models
UpperCAmelCase__ = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
UpperCAmelCase__ = pt_models
UpperCAmelCase__ = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase__ = True
break
# Try again after removing the last word in the name
UpperCAmelCase__ = "".join(camel_case_split(_lowerCAmelCase )[:-1] )
UpperCAmelCase__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase__ = list(_lowerCAmelCase )
all_models.sort()
UpperCAmelCase__ = {"model_type": all_models}
UpperCAmelCase__ = [pt_models[t] for t in all_models]
UpperCAmelCase__ = [tf_models[t] for t in all_models]
UpperCAmelCase__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase__ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase__ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase__ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase__ = "AutoTokenizer"
UpperCAmelCase__ = [processors[t] for t in all_models]
return pd.DataFrame(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase__ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
UpperCAmelCase__ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ):
continue
# First extract all model_names
UpperCAmelCase__ = []
for name in getattr(_lowerCAmelCase , _lowerCAmelCase ).values():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
model_names.append(_lowerCAmelCase )
else:
model_names.extend(list(_lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = get_frameworks_table()
UpperCAmelCase__ = Dataset.from_pandas(_lowerCAmelCase )
UpperCAmelCase__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=_lowerCAmelCase )
UpperCAmelCase__ = Dataset.from_json(_lowerCAmelCase )
UpperCAmelCase__ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(_lowerCAmelCase ) )
}
UpperCAmelCase__ = update_pipeline_and_auto_class_table(_lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase__ = sorted(table.keys() )
UpperCAmelCase__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
UpperCAmelCase__ = Dataset.from_pandas(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowerCAmelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(_lowerCAmelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
UpperCAmelCase__ = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
UpperCAmelCase__ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=_lowerCAmelCase , repo_type="dataset" , token=_lowerCAmelCase , commit_message=_lowerCAmelCase , )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase__ = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase__ = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase__ = pipeline_tasks[key]["pt"]
if isinstance(_lowerCAmelCase , (list, tuple) ):
UpperCAmelCase__ = model[0]
UpperCAmelCase__ = model.__name__
if model not in in_table.values():
missing.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ = ", ".join(_lowerCAmelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 364
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :List[str] , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :int , **lowerCamelCase :str ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[str] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :str , **lowerCamelCase :List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Any , **lowerCamelCase :Dict ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :int , **lowerCamelCase :Dict ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :str ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Tuple , *lowerCamelCase :str , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :List[str] , **lowerCamelCase :Any ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :List[Any] , **lowerCamelCase :Optional[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Dict ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :str ) -> str:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :str , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :Dict , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Tuple , *lowerCamelCase :Optional[Any] , **lowerCamelCase :Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :Optional[int] , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Optional[Any] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :List[Any] ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Optional[int] , **lowerCamelCase :str ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :str , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :List[Any] , **lowerCamelCase :Any ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :Tuple ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :int , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :int , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
| 364
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ ( __UpperCAmelCase ):
__lowerCamelCase = '''big_bird'''
def __init__( self , __A=50_358 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu_new" , __A=0.1 , __A=0.1 , __A=4_096 , __A=2 , __A=0.02 , __A=1e-1_2 , __A=True , __A=0 , __A=1 , __A=2 , __A=66 , __A="block_sparse" , __A=True , __A=False , __A=64 , __A=3 , __A=None , **__A , ) -> Any:
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , sep_token_id=__A , **__A , )
SCREAMING_SNAKE_CASE_ : List[str] =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] =hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_ : int =num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any =use_cache
SCREAMING_SNAKE_CASE_ : str =rescale_embeddings
SCREAMING_SNAKE_CASE_ : Any =attention_type
SCREAMING_SNAKE_CASE_ : int =use_bias
SCREAMING_SNAKE_CASE_ : List[Any] =block_size
SCREAMING_SNAKE_CASE_ : Tuple =num_random_blocks
SCREAMING_SNAKE_CASE_ : str =classifier_dropout
class lowercase_ ( __UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : str ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 443
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = tempfile.mkdtemp()
# fmt: off
a__ : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__ : Tuple = dict(zip(lowercase , range(len(lowercase))))
a__ : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a__ : Dict = {'unk_token': '<unk>'}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
a__ : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Optional[Any] = os.path.join(self.tmpdirname , lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowercase , lowercase)
def __lowercase ( self , **lowercase) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> int:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : Tuple = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : str = self.get_image_processor()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_slow.save_pretrained(self.tmpdirname)
a__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase)
a__ : Tuple = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_fast.save_pretrained(self.tmpdirname)
a__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase)
self.assertIsInstance(processor_fast.tokenizer , lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase)
self.assertIsInstance(processor_fast.image_processor , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : Any = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[int] = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(lowercase , return_tensors='np')
a__ : Optional[int] = processor(images=lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = 'lower newer'
a__ : Optional[int] = processor(text=lowercase)
a__ : Tuple = tokenizer(lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = 'lower newer'
a__ : Tuple = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Dict = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[int] = processor.batch_decode(lowercase)
a__ : int = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : List[str] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : int = 'lower newer'
a__ : List[Any] = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 302
| 0
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : bool , ):
lowercase_ , lowercase_ : int = grid.shape
lowercase_ : List[Any] = [-1, 1, 0, 0]
lowercase_ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase_ , lowercase_ : List[Any] = [(0, source)], set()
lowercase_ : str = np.full((rows, cols) , np.inf )
lowercase_ : Union[str, Any] = 0
lowercase_ : str = np.empty((rows, cols) , dtype=__SCREAMING_SNAKE_CASE )
lowercase_ : int = None
while queue:
((lowercase_) , (lowercase_)) : str = heappop(__SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase_ : Dict = []
while (x, y) != source:
path.append((x, y) )
lowercase_ , lowercase_ : List[Any] = predecessors[x, y]
path.append(__SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ , lowercase_ : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase_ : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
lowercase_ : List[Any] = dist + 1
lowercase_ : Any = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'wav2vec2'
def __init__( self ,__UpperCamelCase=32 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase="group" ,__UpperCamelCase="gelu" ,__UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCamelCase=False ,__UpperCamelCase=128 ,__UpperCamelCase=16 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.05 ,__UpperCamelCase=10 ,__UpperCamelCase=2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=10 ,__UpperCamelCase=0 ,__UpperCamelCase=320 ,__UpperCamelCase=2 ,__UpperCamelCase=0.1 ,__UpperCamelCase=100 ,__UpperCamelCase=256 ,__UpperCamelCase=256 ,__UpperCamelCase=0.1 ,__UpperCamelCase="sum" ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=256 ,__UpperCamelCase=(512, 512, 512, 512, 1500) ,__UpperCamelCase=(5, 3, 3, 1, 1) ,__UpperCamelCase=(1, 2, 3, 1, 1) ,__UpperCamelCase=512 ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,__UpperCamelCase=False ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase ,pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Tuple = feat_extract_norm
lowercase_ : Dict = feat_extract_activation
lowercase_ : List[str] = list(__UpperCamelCase )
lowercase_ : str = list(__UpperCamelCase )
lowercase_ : Dict = list(__UpperCamelCase )
lowercase_ : Optional[Any] = conv_bias
lowercase_ : Dict = num_conv_pos_embeddings
lowercase_ : List[str] = num_conv_pos_embedding_groups
lowercase_ : Optional[Any] = len(self.conv_dim )
lowercase_ : Any = num_hidden_layers
lowercase_ : List[Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : int = hidden_dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Tuple = feat_proj_dropout
lowercase_ : List[str] = final_dropout
lowercase_ : Union[str, Any] = layerdrop
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = vocab_size
lowercase_ : Optional[int] = do_stable_layer_norm
lowercase_ : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : Dict = apply_spec_augment
lowercase_ : Optional[int] = mask_time_prob
lowercase_ : Union[str, Any] = mask_time_length
lowercase_ : List[str] = mask_time_min_masks
lowercase_ : List[str] = mask_feature_prob
lowercase_ : Any = mask_feature_length
lowercase_ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : List[Any] = num_codevectors_per_group
lowercase_ : Optional[int] = num_codevector_groups
lowercase_ : Dict = contrastive_logits_temperature
lowercase_ : int = feat_quantizer_dropout
lowercase_ : Optional[int] = num_negatives
lowercase_ : str = codevector_dim
lowercase_ : str = proj_codevector_dim
lowercase_ : Optional[Any] = diversity_loss_weight
# ctc loss
lowercase_ : Tuple = ctc_loss_reduction
lowercase_ : int = ctc_zero_infinity
# adapter
lowercase_ : int = add_adapter
lowercase_ : Dict = adapter_kernel_size
lowercase_ : List[str] = adapter_stride
lowercase_ : Dict = num_adapter_layers
lowercase_ : Dict = output_hidden_size or hidden_size
lowercase_ : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ : Any = list(__UpperCamelCase )
lowercase_ : str = list(__UpperCamelCase )
lowercase_ : Any = list(__UpperCamelCase )
lowercase_ : Tuple = xvector_output_dim
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 477
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case : Optional[Any] = logging.get_logger(__name__)
# General docstring
snake_case : str = '''RegNetConfig'''
# Base docstring
snake_case : Union[str, Any] = '''facebook/regnet-y-040'''
snake_case : List[Any] = [1, 10_88, 7, 7]
# Image classification docstring
snake_case : Tuple = '''facebook/regnet-y-040'''
snake_case : Tuple = '''tabby, tabby cat'''
snake_case : Union[str, Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = 3 , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = "relu" , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a :List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
a :List[Any] = tf.keras.layers.ConvaD(
filters=_lowerCamelCase , kernel_size=_lowerCamelCase , strides=_lowerCamelCase , padding='''VALID''' , groups=_lowerCamelCase , use_bias=_lowerCamelCase , name='''convolution''' , )
a :Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
a :Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = self.convolution(self.padding(_lowerCamelCase ) )
a :str = self.normalization(_lowerCamelCase )
a :List[Any] = self.activation(_lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :str = config.num_channels
a :str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = shape_list(_lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a :Dict = tf.transpose(_lowerCamelCase , perm=(0, 2, 3, 1) )
a :Dict = self.embedder(_lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = 2 , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = tf.keras.layers.ConvaD(
filters=_lowerCamelCase , kernel_size=1 , strides=_lowerCamelCase , use_bias=_lowerCamelCase , name='''convolution''' )
a :Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False ):
return self.normalization(self.convolution(_lowerCamelCase ) , training=_lowerCamelCase )
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCamelCase , name='''pooler''' )
a :List[str] = [
tf.keras.layers.ConvaD(filters=_lowerCamelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_lowerCamelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
a :str = self.pooler(_lowerCamelCase )
for layer_module in self.attention:
a :Tuple = layer_module(_lowerCamelCase )
a :int = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :Any = in_channels != out_channels or stride != 1
a :Any = max(1 , out_channels // config.groups_width )
a :int = (
TFRegNetShortCut(_lowerCamelCase , stride=_lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a :str = [
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase , name='''layer.2''' ),
]
a :str = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Any = hidden_state
for layer_module in self.layers:
a :Dict = layer_module(_lowerCamelCase )
a :List[str] = self.shortcut(_lowerCamelCase )
hidden_state += residual
a :List[Any] = self.activation(_lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :Dict = in_channels != out_channels or stride != 1
a :List[str] = max(1 , out_channels // config.groups_width )
a :Any = (
TFRegNetShortCut(_lowerCamelCase , stride=_lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
a :Tuple = [
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase , name='''layer.3''' ),
]
a :Any = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = hidden_state
for layer_module in self.layers:
a :Tuple = layer_module(_lowerCamelCase )
a :Any = self.shortcut(_lowerCamelCase )
hidden_state += residual
a :int = self.activation(_lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 2 , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :List[str] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
a :Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , name='''layers.0''' ),
*[layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
for layer_module in self.layers:
a :Tuple = layer_module(_lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
a :str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , depth=_lowerCamelCase , name=F'''stages.{i+1}''' ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True ):
a :str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a :List[Any] = hidden_states + (hidden_state,)
a :Optional[Any] = stage_module(_lowerCamelCase )
if output_hidden_states:
a :List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCamelCase , hidden_states=_lowerCamelCase )
@keras_serializable
class _snake_case ( tf.keras.layers.Layer ):
SCREAMING_SNAKE_CASE__ = RegNetConfig
def __init__( self , _lowerCamelCase , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
a :Tuple = config
a :List[Any] = TFRegNetEmbeddings(_lowerCamelCase , name='''embedder''' )
a :Union[str, Any] = TFRegNetEncoder(_lowerCamelCase , name='''encoder''' )
a :Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCamelCase , name='''pooler''' )
@unpack_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
a :Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a :str = self.embedder(_lowerCamelCase , training=_lowerCamelCase )
a :Optional[int] = self.encoder(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase )
a :Union[str, Any] = encoder_outputs[0]
a :Optional[int] = self.pooler(_lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
a :Optional[Any] = tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) )
a :Any = tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a :Optional[int] = tuple([tf.transpose(_lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = RegNetConfig
SCREAMING_SNAKE_CASE__ = 'regnet'
SCREAMING_SNAKE_CASE__ = 'pixel_values'
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
snake_case : Union[str, Any] = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
snake_case : Union[str, Any] = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , _snake_case , )
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
a :List[str] = TFRegNetMainLayer(_lowerCamelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , ):
a :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a :str = return_dict if return_dict is not None else self.config.use_return_dict
a :Tuple = self.regnet(
pixel_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _snake_case , )
class _snake_case ( _snake_case , _snake_case ):
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
a :Dict = config.num_labels
a :Union[str, Any] = TFRegNetMainLayer(_lowerCamelCase , name='''regnet''' )
# classification head
a :Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , ):
a :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a :Tuple = self.regnet(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase , training=_lowerCamelCase )
a :Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
a :Any = self.classifier[0](_lowerCamelCase )
a :Optional[int] = self.classifier[1](_lowerCamelCase )
a :Any = None if labels is None else self.hf_compute_loss(labels=_lowerCamelCase , logits=_lowerCamelCase )
if not return_dict:
a :Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states )
| 445
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _snake_case ( _snake_case ):
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Optional[Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
import apache_beam as beam
a :Any = beam.io.parquetio.WriteToParquet
a :Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
a :str = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Dict = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :List[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
a :Any = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 445
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 4_00_00_00 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [0, 1]
lowercase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase_ = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 567
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389
| 0
|
"""simple docstring"""
_snake_case = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 718
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return str(lowercase_ ) == str(lowercase_ )[::-1]
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return int(lowercase_ ) + int(str(lowercase_ )[::-1] )
def lowerCAmelCase_ ( lowercase_ : int = 1_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = []
for num in range(1 , lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[str] = num
while iterations < 50:
__SCREAMING_SNAKE_CASE : Optional[Any] = sum_reverse(lowercase_ )
iterations += 1
if is_palindrome(lowercase_ ):
break
else:
lychrel_nums.append(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 674
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase : Optional[int] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 284
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case : Any = noise_pred.chunk(2 )
snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 721
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[str] = (DEISMultistepScheduler,)
_snake_case : Any = (('''num_inference_steps''', 2_5),)
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self , _UpperCamelCase=0 , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : int = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('num_inference_steps' , _UpperCamelCase )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
UpperCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase_ : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self , _UpperCamelCase=0 , **_UpperCamelCase ) -> Any:
UpperCAmelCase_ : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[Any] = kwargs.pop('num_inference_steps' , _UpperCamelCase )
UpperCAmelCase_ : List[str] = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , _UpperCamelCase=None , **_UpperCamelCase ) -> int:
if scheduler is None:
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config(**_UpperCamelCase )
UpperCAmelCase_ : int = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = 1_0
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Dict = kwargs.pop('num_inference_steps' , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.dummy_sample
UpperCAmelCase_ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , 'set_timesteps' ):
UpperCAmelCase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase_ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase_ : List[str] = scheduler.timesteps[5]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[6]
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self ) -> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ : int = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_ : Union[str, Any] = self.full_loop(scheduler=_UpperCamelCase )
UpperCAmelCase_ : str = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
UpperCAmelCase_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Tuple = self.full_loop(scheduler=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def __UpperCAmelCase ( self ) -> List[Any]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , algorithm_type='deis' , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , algorithm_type=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , algorithm_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.full_loop()
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = 1_0
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 406
|
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
if not nums:
return 0
UpperCAmelCase_ : int = nums[0]
UpperCAmelCase_ : Any = 0
for num in nums[1:]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406
| 1
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase : int = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _a ( lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
require_version(deps[pkg] , lowerCAmelCase_ )
| 47
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 401
| 1
|
'''simple docstring'''
from PIL import Image
def __a(SCREAMING_SNAKE_CASE_ : Image ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 489
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list[float] , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of first array: ").split()]
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 489
| 1
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_a : Optional[int] = re.compile(R"\b(a|an|the)\b", re.UNICODE)
_a : Tuple = None
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCamelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCamelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = get_tokens(lowerCamelCase__ )
__UpperCAmelCase : Any = get_tokens(lowerCamelCase__ )
__UpperCAmelCase : Any = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
__UpperCAmelCase : Tuple = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__UpperCAmelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__UpperCAmelCase : Any = 1.0 * num_same / len(lowerCamelCase__ )
__UpperCAmelCase : Dict = (2 * precision * recall) / (precision + recall)
return fa
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = {}
__UpperCAmelCase : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : Optional[int] = qa["id"]
__UpperCAmelCase : str = [t for t in qa["answers"]["text"] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__UpperCAmelCase : str = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
__UpperCAmelCase : Any = preds[qid]
# Take max over all gold answers
__UpperCAmelCase : Tuple = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
__UpperCAmelCase : Tuple = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Any = {}
for qid, s in scores.items():
__UpperCAmelCase : str = na_probs[qid] > na_prob_thresh
if pred_na:
__UpperCAmelCase : Any = float(not qid_to_has_ans[qid] )
else:
__UpperCAmelCase : Optional[int] = s
return new_scores
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
__UpperCAmelCase : Tuple = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__UpperCAmelCase : Dict = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
for k in new_eval:
__UpperCAmelCase : Optional[int] = new_eval[k]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
plt.step(lowerCamelCase__ , lowerCamelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
__UpperCAmelCase : List[Any] = 0.0
__UpperCAmelCase : int = 1.0
__UpperCAmelCase : Tuple = 0.0
__UpperCAmelCase : Union[str, Any] = [1.0]
__UpperCAmelCase : Optional[int] = [0.0]
__UpperCAmelCase : List[str] = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__UpperCAmelCase : int = true_pos / float(i + 1 )
__UpperCAmelCase : Tuple = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
__UpperCAmelCase : int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__UpperCAmelCase : int = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__UpperCAmelCase : List[str] = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__UpperCAmelCase : Optional[Any] = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
__UpperCAmelCase : Tuple = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_exact" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_f1" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_oracle" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if not qid_list:
return
__UpperCAmelCase : Optional[int] = [na_probs[k] for k in qid_list]
__UpperCAmelCase : Union[str, Any] = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCamelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__UpperCAmelCase : Optional[Any] = num_no_ans
__UpperCAmelCase : Optional[Any] = cur_score
__UpperCAmelCase : Dict = 0.0
__UpperCAmelCase : List[Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__UpperCAmelCase : Dict = scores[qid]
else:
if preds[qid]:
__UpperCAmelCase : List[Any] = -1
else:
__UpperCAmelCase : List[Any] = 0
cur_score += diff
if cur_score > best_score:
__UpperCAmelCase : List[str] = cur_score
__UpperCAmelCase : List[Any] = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : str = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Dict = best_exact
__UpperCAmelCase : Tuple = exact_thresh
__UpperCAmelCase : Union[str, Any] = best_fa
__UpperCAmelCase : List[Any] = fa_thresh
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
__UpperCAmelCase : Any = json.load(lowerCamelCase__ )
__UpperCAmelCase : str = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__UpperCAmelCase : str = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__UpperCAmelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
__UpperCAmelCase : Tuple = {k: 0.0 for k in preds}
__UpperCAmelCase : str = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
__UpperCAmelCase : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__UpperCAmelCase : Any = [k for k, v in qid_to_has_ans.items() if not v]
__UpperCAmelCase : int = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
__UpperCAmelCase : Any = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
__UpperCAmelCase : str = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
__UpperCAmelCase : List[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "HasAns" )
if no_ans_qids:
__UpperCAmelCase : Optional[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
_a : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 168
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase_ : Tuple = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowercase_ : Dict = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowercase_ : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ),
}
| 572
| 0
|
import math
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_=0 ) -> Optional[Any]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , a_ )] for i in range(0 , a_ )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , a_ )] for i in range(0 , a_ )
] # dp[i][j] stores minimum distance from i to j
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = w
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 706
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase_ ( lowercase ):
__lowercase : torch.FloatTensor
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("DownEncoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_=True , ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
# down
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCamelCase_ ) - 1
_UpperCamelCase = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1E-6 )
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = 2 * out_channels if double_z else out_channels
_UpperCamelCase = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
_UpperCamelCase = False
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = x
_UpperCamelCase = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ ):
def custom_forward(*lowerCamelCase_ ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase = down_block(lowerCamelCase_ )
# middle
_UpperCamelCase = self.mid_block(lowerCamelCase_ )
# post-process
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ )
_UpperCamelCase = self.conv_act(lowerCamelCase_ )
_UpperCamelCase = self.conv_out(lowerCamelCase_ )
return sample
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("UpDecoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_="group" , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
_UpperCamelCase = list(reversed(lowerCamelCase_ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = i == len(lowerCamelCase_ ) - 1
_UpperCamelCase = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
_UpperCamelCase = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1E-6 )
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
_UpperCamelCase = False
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = z
_UpperCamelCase = self.conv_in(lowerCamelCase_ )
_UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ ):
def custom_forward(*lowerCamelCase_ ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
_UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ )
else:
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.conv_act(lowerCamelCase_ )
_UpperCamelCase = self.conv_out(lowerCamelCase_ )
return sample
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="random" , lowerCamelCase_=False , lowerCamelCase_=True ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = n_e
_UpperCamelCase = vq_embed_dim
_UpperCamelCase = beta
_UpperCamelCase = legacy
_UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase = self.used.shape[0]
_UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase = self.re_embed
_UpperCamelCase = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase = n_e
_UpperCamelCase = sane_index_shape
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(lowerCamelCase_ ) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1 )
_UpperCamelCase = self.used.to(lowerCamelCase_ )
_UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase = match.argmax(-1 )
_UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(lowerCamelCase_ ) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1 )
_UpperCamelCase = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase = 0 # simply set to zero
_UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
_UpperCamelCase = self.embedding(lowerCamelCase_ ).view(z.shape )
_UpperCamelCase = None
_UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase = self.remap_to_used(lowerCamelCase_ )
_UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
if self.remap is not None:
_UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase = self.unmap_to_all(lowerCamelCase_ )
_UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase = self.embedding(lowerCamelCase_ )
if shape is not None:
_UpperCamelCase = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parameters
_UpperCamelCase , _UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
_UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase = deterministic
_UpperCamelCase = torch.exp(0.5 * self.logvar )
_UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase = _UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase ( self , lowerCamelCase_ = None ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCamelCase = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase = self.mean + self.std * sample
return x
def lowercase ( self , lowerCamelCase_=None ) -> List[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=[1, 2, 3] ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return self.mean
| 589
| 0
|
from __future__ import annotations
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
if len(__lowercase ) == 0:
return False
A_ : Optional[Any] = len(__lowercase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,__lowercase )
else:
return binary_search(a_list[midpoint + 1 :] ,__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
_UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 558
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.convolution(self.padding(lowercase ) )
A_ : Optional[Any] = self.normalization(lowercase )
A_ : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = config.num_channels
A_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Dict = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Any = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.pooler(lowercase )
for layer_module in self.attention:
A_ : List[str] = layer_module(lowercase )
A_ : str = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : int = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Optional[int] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : str = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Tuple = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = config
A_ : List[str] = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : Dict = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.embedder(lowercase , training=lowercase )
A_ : int = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = encoder_outputs[0]
A_ : Any = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Dict = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Union[str, Any] = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : str = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Dict = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Union[str, Any] = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 558
| 1
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __magic_name__:
def __init__( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=9_9 , __UpperCamelCase : int=1_3 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[Any]=3_0 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : int=1 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=None , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = decoder_seq_length
# For common tests
snake_case__ = self.decoder_seq_length
snake_case__ = is_training
snake_case__ = use_attention_mask
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = d_model
snake_case__ = d_model
snake_case__ = decoder_layers
snake_case__ = decoder_layers
snake_case__ = decoder_ffn_dim
snake_case__ = decoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = eos_token_id
snake_case__ = bos_token_id
snake_case__ = pad_token_id
snake_case__ = decoder_start_token_id
snake_case__ = use_cache
snake_case__ = max_position_embeddings
snake_case__ = None
snake_case__ = decoder_seq_length
snake_case__ = 2
snake_case__ = 1
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case__ = None
if self.use_attention_mask:
snake_case__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __lowerCAmelCase( self : Any , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
snake_case__ = True
snake_case__ = TrOCRDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
snake_case__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case__ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case__ = model(__UpperCamelCase )
snake_case__ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
snake_case__ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = model(__UpperCamelCase )["""last_hidden_state"""]
snake_case__ = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __magic_name__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Any = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Tuple = False
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCamelCase )
snake_case__ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCamelCase )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
pass
| 566
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a__ = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
a__ = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
a__ = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def snake_case__ ( a , a ) -> List[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def snake_case__ ( a , a ) -> Tuple:
'''simple docstring'''
snake_case__ = simple_accuracy(a , a )
snake_case__ = float(fa_score(y_true=a , y_pred=a ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case__ ( a , a ) -> Optional[Any]:
'''simple docstring'''
snake_case__ = np.array(a )
snake_case__ = np.array(a )
snake_case__ = en_sentvecs.shape[0]
# mean centering
snake_case__ = en_sentvecs - np.mean(a , axis=0 )
snake_case__ = in_sentvecs - np.mean(a , axis=0 )
snake_case__ = cdist(a , a , """cosine""" )
snake_case__ = np.array(range(a ) )
snake_case__ = sim.argsort(axis=1 )[:, :10]
snake_case__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__( datasets.Metric ):
def __lowerCAmelCase( self : str ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def __lowerCAmelCase( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 566
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : List[Any] = ['''input_features''']
def __init__( self : Optional[int] , lowercase__ : Any=80 , lowercase__ : List[Any]=1_6000 , lowercase__ : str=160 , lowercase__ : Optional[int]=30 , lowercase__ : Optional[Any]=400 , lowercase__ : str=0.0 , lowercase__ : int=False , **lowercase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
a_ : Optional[int] = n_fft
a_ : Optional[Any] = hop_length
a_ : Optional[int] = chunk_length
a_ : Any = chunk_length * sampling_rate
a_ : Any = self.n_samples // hop_length
a_ : Tuple = sampling_rate
a_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowercase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowercase_ ( self : Optional[Any] , lowercase__ : np.array ):
'''simple docstring'''
a_ : Dict = spectrogram(
lowercase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
a_ : str = log_spec[:, :-1]
a_ : List[str] = np.maximum(lowercase__ , log_spec.max() - 8.0 )
a_ : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( lowercase__ : List[np.ndarray] , lowercase__ : List[np.ndarray] , lowercase__ : float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
a_ : Tuple = np.array(lowercase__ , np.intaa )
a_ : Optional[Any] = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1 ) ):
a_ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a_ : Any = padding_value
normed_input_values.append(lowercase__ )
else:
a_ : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Dict , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : bool = True , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[str] = "max_length" , lowercase__ : Optional[int] = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , **lowercase__ : List[Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
a_ : Union[str, Any] = isinstance(lowercase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
a_ : Optional[Any] = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray ):
a_ : Optional[int] = np.asarray(lowercase__ , dtype=np.floataa )
elif isinstance(lowercase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Optional[Any] = [np.asarray([raw_speech] ).T]
a_ : int = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
a_ : Optional[int] = self.pad(
lowercase__ , padding=lowercase__ , max_length=max_length if max_length else self.n_samples , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a_ : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
a_ : Union[str, Any] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
a_ : int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
a_ : Optional[Any] = [self._np_extract_fbank_features(lowercase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase__ ):
a_ : Optional[int] = [np.asarray(lowercase__ , dtype=np.floataa ) for feature in input_features]
else:
a_ : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a_ : Optional[Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
a_ : List[Any] = padded_inputs.convert_to_tensors(lowercase__ )
return padded_inputs
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[Any] = copy.deepcopy(self.__dict__ )
a_ : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 442
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
a_ : str = 1024
a_ : List[str] = 4096
a_ : int = 24
a_ : int = 16
a_ : Optional[Any] = [5, 11, 17, 23]
a_ : Optional[int] = [256, 512, 1024, 1024]
a_ : Union[str, Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a_ : Dict = 768
a_ : str = [1, 1, 1, 0.5]
a_ : Dict = [256, 512, 768, 768]
a_ : int = 150
a_ : Any = 16
a_ : Optional[int] = (1, 384, 384)
a_ : Optional[Any] = False
a_ : Dict = """project"""
if "ade" in checkpoint_url:
a_ : int = True
a_ : int = 768
a_ : Optional[int] = [1, 1, 1, 0.5]
a_ : Dict = 150
a_ : Any = 16
a_ : Optional[int] = """huggingface/label-files"""
a_ : Optional[int] = """ade20k-id2label.json"""
a_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
a_ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
a_ : int = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : str ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a_ : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
a_ : Union[str, Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
a_ : Optional[int] = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
a_ : List[str] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
a_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
a_ : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
a_ : Union[str, Any] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
a_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
a_ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
a_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
a_ : int = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
a_ : Tuple = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
a_ : str = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
a_ : Optional[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
a_ : Dict = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
a_ : Tuple = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
a_ : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
a_ : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a_ : List[str] = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
a_ : Union[str, Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
a_ : Any = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
a_ : List[str] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
a_ : str = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
a_ : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a_ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
a_ : int = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
a_ : str = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
a_ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
a_ : int = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
a_ : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
a_ : int = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
a_ : Any = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
a_ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
a_ : Any = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
a_ : Any = name.replace("""..""" , """.""" )
if "stem.conv" in name:
a_ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
a_ : Dict = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
a_ : str = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
a_ : Any = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
a_ : Dict = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
a_ : List[str] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
a_ : int = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
a_ : Optional[Any] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a_ : Any = in_proj_weight[: config.hidden_size, :]
a_ : int = in_proj_bias[: config.hidden_size]
a_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
a_ : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ , a_ : Optional[Any] = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a_ : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
a_ : Any = state_dict.pop(UpperCamelCase__ )
a_ : Tuple = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
a_ : Any = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
a_ : str = 480 if """ade""" in checkpoint_url else 384
a_ : Optional[int] = DPTImageProcessor(size=UpperCamelCase__ )
a_ : Dict = prepare_img()
a_ : Any = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
a_ : Dict = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
a_ : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 442
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCamelCase__ = """__DUMMY_TRANSFORMERS_USER__"""
lowerCamelCase__ = """Dummy User"""
lowerCamelCase__ = """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"""
lowerCamelCase__ = """https://hub-ci.huggingface.co"""
lowerCamelCase__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCamelCase__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCamelCase__ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Optional[Any]:
return HfApi(endpoint=SCREAMING_SNAKE_CASE_ )
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
def _cleanup_repo(SCREAMING_SNAKE_CASE_ ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE_ ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE_ )
return _temporary_repo
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[Any] = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
lowerCAmelCase__ : Tuple = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data/text_data.txt' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : int = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
lowerCAmelCase__ : int = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : int = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
lowerCAmelCase__ : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return hf_private_dataset_repo_zipped_img_data_
| 702
|
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 69
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
SCREAMING_SNAKE_CASE = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowercase_ ( __A : List[Any] , __A : List[Any] ) -> int:
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase_ ( __A : Any , __A : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : int =simple_accuracy(__A , __A )
lowercase : Optional[int] =float(fa_score(y_true=__A , y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase_ ( __A : Any , __A : List[str] ) -> str:
"""simple docstring"""
lowercase : Any =float(pearsonr(__A , __A )[0] )
lowercase : Optional[int] =float(spearmanr(__A , __A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 94
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=30, __magic_name__=2, __magic_name__=3, __magic_name__=True, __magic_name__=True, __magic_name__=32, __magic_name__=2, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=10, __magic_name__=0.02, __magic_name__=3, __magic_name__=None, __magic_name__=2, ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : List[str] = image_size
UpperCamelCase__ : int = patch_size
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : str = scope
UpperCamelCase__ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase__ : Optional[int] = num_patches + 2
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[int] = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__magic_name__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = TFDeiTModel(config=__magic_name__ )
UpperCamelCase__ : Any = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : int = TFDeiTForMaskedImageModeling(config=__magic_name__ )
UpperCamelCase__ : List[str] = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : int = TFDeiTForMaskedImageModeling(__magic_name__ )
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.type_sequence_label_size
UpperCamelCase__ : str = TFDeiTForImageClassification(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : str = 1
UpperCamelCase__ : Optional[Any] = TFDeiTForImageClassification(__magic_name__ )
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : int = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = config_and_inputs
UpperCamelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a : Tuple = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a : Union[str, Any] = False
a : List[str] = False
a : List[str] = False
a : List[Any] = False
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = TFDeiTModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCamelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__, tf.keras.layers.Dense ) )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(__magic_name__ )
UpperCamelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super()._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = TFDeiTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Union[str, Any] = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(images=__magic_name__, return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : List[str] = model(**__magic_name__ )
# verify the logits
UpperCamelCase__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
| 716
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: str ) -> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCamelCase__ : Any = TapasConfig.from_json_file(__UpperCAmelCase )
# set absolute/relative position embeddings parameter
UpperCamelCase__ : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCamelCase__ : List[Any] = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCamelCase__ : Any = 4
UpperCamelCase__ : List[str] = True
# hparam_utils.py hparams
UpperCamelCase__ : int = 0.664694
UpperCamelCase__ : Union[str, Any] = 0.207951
UpperCamelCase__ : Any = 0.121194
UpperCamelCase__ : int = True
UpperCamelCase__ : Any = True
UpperCamelCase__ : str = False
UpperCamelCase__ : int = 0.0352513
UpperCamelCase__ : Dict = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCamelCase__ : Any = 4
UpperCamelCase__ : List[str] = False
# hparam_utils.py hparams
UpperCamelCase__ : int = 36.4519
UpperCamelCase__ : int = 0.903421
UpperCamelCase__ : List[str] = 222.088
UpperCamelCase__ : Dict = True
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Any = True
UpperCamelCase__ : int = 0.763141
UpperCamelCase__ : Union[str, Any] = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "TABFACT":
UpperCamelCase__ : List[Any] = TapasForSequenceClassification(config=__UpperCAmelCase )
elif task == "MLM":
UpperCamelCase__ : Optional[Any] = TapasForMaskedLM(config=__UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCamelCase__ : str = TapasModel(config=__UpperCAmelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__UpperCAmelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
UpperCamelCase__ : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(__UpperCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 369
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowerCamelCase ( ) ->Tuple:
snake_case__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
snake_case__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case__ = args.func(UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 368
|
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ = 10_00 ) ->int:
return sum(e for e in range(3 , UpperCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [1]
for i in range(2, lowerCamelCase):
factorials.append(factorials[-1] * i)
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCAmelCase = []
__lowerCAmelCase = list(range(lowerCamelCase))
# Find permutation
while factorials:
__lowerCAmelCase = factorials.pop()
__lowerCAmelCase , __lowerCAmelCase = divmod(lowerCamelCase, lowerCamelCase)
permutation.append(elements[number])
elements.remove(elements[number])
permutation.append(elements[0])
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474
|
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 474
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 281
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( lowercase__ :Optional[Any], lowercase__ :List[str]=0.999, lowercase__ :Optional[int]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ :str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ :Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(lowercase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ), lowercase__ ) )
return torch.tensor(lowercase__, dtype=torch.floataa )
class __snake_case (lowerCamelCase , lowerCamelCase ):
__a = [e.name for e in KarrasDiffusionSchedulers]
__a = 2
@register_to_config
def __init__( self: Any , A_: int = 10_00 , A_: float = 0.00_085 , A_: float = 0.012 , A_: str = "linear" , A_: Optional[Union[np.ndarray, List[float]]] = None , A_: str = "epsilon" , A_: Optional[bool] = False , A_: Optional[bool] = False , A_: float = 1.0 , A_: str = "linspace" , A_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
__lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
__lowerCamelCase = use_karras_sigmas
def __a ( self: Optional[Any] , A_: List[Any] , A_: Tuple=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(A_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __a ( self: Tuple ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __a ( self: Union[str, Any] , A_: torch.FloatTensor , A_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(A_ )
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __a ( self: str , A_: int , A_: Union[str, torch.device] = None , A_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = np.log(A_ )
__lowerCamelCase = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
if self.config.use_karras_sigmas:
__lowerCamelCase = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps )
__lowerCamelCase = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(A_ ).to(device=A_ )
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.from_numpy(A_ )
__lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = timesteps.to(A_ , dtype=torch.floataa )
else:
__lowerCamelCase = timesteps.to(device=A_ )
# empty dt and derivative
__lowerCamelCase = None
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(A_ )
def __a ( self: Any , A_: int , A_: int ):
# get log sigma
__lowerCamelCase = np.log(A_ )
# get distribution
__lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = log_sigmas[low_idx]
__lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = np.clip(A_ , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.reshape(sigma.shape )
return t
def __a ( self: Dict , A_: torch.FloatTensor , A_: Tuple ):
__lowerCamelCase = in_sigmas[-1].item()
__lowerCamelCase = in_sigmas[0].item()
__lowerCamelCase = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase = np.linspace(0 , 1 , A_ )
__lowerCamelCase = sigma_min ** (1 / rho)
__lowerCamelCase = sigma_max ** (1 / rho)
__lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __a ( self: List[Any] ):
return self.dt is None
def __a ( self: Union[str, Any] , A_: Union[torch.FloatTensor, np.ndarray] , A_: Union[float, torch.FloatTensor] , A_: Union[torch.FloatTensor, np.ndarray] , A_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(A_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase = derivative
__lowerCamelCase = dt
__lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase = (sample - pred_original_sample) / sigma_next
__lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase = self.dt
__lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def __a ( self: str , A_: torch.FloatTensor , A_: torch.FloatTensor , A_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 281
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ):
__A : Optional[Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__A : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__A : str = token_dict["token"]
__A : int = Tokenizer(Unigram() )
__A : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__A : Dict = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__A : str = decoders.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__A : Dict = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__A : Union[str, Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = 8_000 , __UpperCAmelCase = True , ):
__A : int = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__A : str = [files]
self._tokenizer.train(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = 8_000 , __UpperCAmelCase = True , ):
__A : Any = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
self._tokenizer.train_from_iterator(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def __UpperCAmelCase( self ):
__A : str = json.loads(self._tokenizer.to_str() )
__A : List[Any] = self.special_tokens["unk"]["id"]
__A : List[Any] = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 709
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads" ) )
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
__A : Optional[int] = parent
__A : Optional[int] = batch_size
__A : List[Any] = image_size
__A : int = patch_sizes
__A : Optional[Any] = patch_stride
__A : Tuple = patch_padding
__A : str = is_training
__A : List[str] = use_labels
__A : Union[str, Any] = num_labels
__A : Union[str, Any] = num_channels
__A : Tuple = embed_dim
__A : int = num_heads
__A : str = stride_kv
__A : Optional[int] = depth
__A : Tuple = cls_token
__A : Any = attention_drop_rate
__A : Optional[int] = initializer_range
__A : Optional[Any] = layer_norm_eps
def __UpperCAmelCase( self ):
__A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Dict = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = CvtModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
__A : str = (self.image_size, self.image_size)
__A , __A : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : str = self.num_labels
__A : Any = CvtForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self ):
__A : Any = self.prepare_config_and_inputs()
__A , __A , __A : Any = config_and_inputs
__A : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : Any = CvtModelTester(self )
__A : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(__UpperCAmelCase )
__A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__A : Any = outputs.hidden_states
__A : List[Any] = len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = CvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Dict:
__A : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase( self ):
__A : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
__A : List[str] = self.default_image_processor
__A : Optional[Any] = prepare_img()
__A : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**__UpperCAmelCase )
# verify the logits
__A : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__A : List[str] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 387
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=None , _snake_case=True , _snake_case=None , **_snake_case ):
_UpperCAmelCase =parent
_UpperCAmelCase =config_class
_UpperCAmelCase =has_text_modality
_UpperCAmelCase =kwargs
_UpperCAmelCase =common_properties
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
_UpperCAmelCase =(
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_snake_case , _snake_case ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_snake_case ):
try:
setattr(_snake_case , _snake_case , _snake_case )
self.parent.assertEqual(
getattr(_snake_case , _snake_case ) , _snake_case , msg=F"`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_snake_case ):
try:
_UpperCAmelCase =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_snake_case , _snake_case ) , _snake_case , msg=F"`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
_UpperCAmelCase =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase =os.path.join(_snake_case , "config.json" )
config_first.to_json_file(_snake_case )
_UpperCAmelCase =self.config_class.from_json_file(_snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_snake_case )
_UpperCAmelCase =self.config_class.from_pretrained(_snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
_UpperCAmelCase ="test"
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase =os.path.join(_snake_case , _snake_case )
config_first.save_pretrained(_snake_case )
_UpperCAmelCase =self.config_class.from_pretrained(_snake_case , subfolder=_snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCAmelCase =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def SCREAMING_SNAKE_CASE ( self ):
if self.config_class.is_composition:
return
_UpperCAmelCase =self.config_class()
self.parent.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =copy.deepcopy(_snake_case )
_UpperCAmelCase =self.config_class(**_snake_case )
_UpperCAmelCase =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_snake_case , _snake_case ) != value:
wrong_values.append((key, getattr(_snake_case , _snake_case ), value) )
if len(_snake_case ) > 0:
_UpperCAmelCase ="\n".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 408
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->int:
_UpperCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase =""
else:
_UpperCAmelCase ="deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase =in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase =in_proj_bias[: config.hidden_size]
_UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( ) ->int:
_UpperCAmelCase ="http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =DeiTConfig()
# all deit models have fine-tuned heads
_UpperCAmelCase =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase =1000
_UpperCAmelCase ="huggingface/label-files"
_UpperCAmelCase ="imagenet-1k-id2label.json"
_UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase =idalabel
_UpperCAmelCase ={v: k for k, v in idalabel.items()}
_UpperCAmelCase =int(deit_name[-6:-4] )
_UpperCAmelCase =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_UpperCAmelCase =192
_UpperCAmelCase =768
_UpperCAmelCase =12
_UpperCAmelCase =3
elif deit_name[9:].startswith("small" ):
_UpperCAmelCase =384
_UpperCAmelCase =1536
_UpperCAmelCase =12
_UpperCAmelCase =6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_UpperCAmelCase =1024
_UpperCAmelCase =4096
_UpperCAmelCase =24
_UpperCAmelCase =16
# load original model from timm
_UpperCAmelCase =timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase =timm_model.state_dict()
_UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_UpperCAmelCase =DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCAmelCase =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCAmelCase =DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
_UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase =encoding["pixel_values"]
_UpperCAmelCase =model(_lowerCamelCase )
_UpperCAmelCase =timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
snake_case__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 408
| 1
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__: Optional[int] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__: Dict = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__: Any = spec.loader.load_module()
lowerCAmelCase__: Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__: Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCAmelCase__: Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __SCREAMING_SNAKE_CASE ( ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : Any = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.getsource(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Tuple = True
break
SCREAMING_SNAKE_CASE_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 311
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
class lowercase_ :
_lowerCamelCase = 'dummy_data'
_lowerCamelCase = 'datasets'
_lowerCamelCase = False
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = True , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Any = dataset_name
_snake_case : int = cache_dir
_snake_case : Optional[Any] = use_local_dummy_data
_snake_case : str = config
# download_callbacks take a single url as input
_snake_case : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_snake_case : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_snake_case : Union[str, Any] = str(lowercase_ )
# to be downloaded
_snake_case : str = None
_snake_case : str = None
@property
def UpperCamelCase ( self ):
if self._dummy_file is None:
_snake_case : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def UpperCamelCase ( self ):
_snake_case : List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_snake_case : Optional[int] = cached_path(
lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_ )
return os.path.join(lowercase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self ):
if self._bucket_url is None:
_snake_case : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def UpperCamelCase ( self , lowercase_ , *lowercase_ ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_snake_case : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_snake_case : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase_ , lowercase_ ):
return self.create_dummy_data_dict(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , (list, tuple) ):
return self.create_dummy_data_list(lowercase_ , lowercase_ )
else:
return self.create_dummy_data_single(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_ , *lowercase_ ):
return self.download_and_extract(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return self.download_and_extract(lowercase_ )
def UpperCamelCase ( self , lowercase_ , *lowercase_ , **lowercase_ ):
return path
def UpperCamelCase ( self ):
return {}
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase_ , lowercase_ ):
for single_url in single_urls:
download_callback(lowercase_ )
else:
_snake_case : Union[str, Any] = single_urls
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Optional[int] = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) ) for x in single_urls]
else:
_snake_case : List[Any] = single_urls
_snake_case : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) )
_snake_case : Tuple = value
# make sure that values are unique
if all(isinstance(lowercase_ , lowercase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_snake_case : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_snake_case : Optional[int] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowercase_ ) ) for url in data_url )
_snake_case : int = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_snake_case : List[Any] = [data_url[0]] * len(lowercase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_snake_case : Any = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowercase_ )
return dummy_data_list
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
for download_callback in self.download_callbacks:
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_snake_case : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowercase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self , lowercase_ ):
def _iter_archive_members(lowercase_ ):
# this preserves the order of the members inside the ZIP archive
_snake_case : Tuple = Path(self.dummy_file ).parent
_snake_case : Tuple = path.relative_to(lowercase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_snake_case : Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase_ )
_snake_case : List[Any] = Path(lowercase_ )
_snake_case : Tuple = _iter_archive_members(lowercase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowercase_ ).as_posix(), file_path.open("rb" )
def UpperCamelCase ( self , lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
_snake_case : Dict = [paths]
for path in paths:
if os.path.isfile(lowercase_ ):
if os.path.basename(lowercase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase_ ):
if os.path.basename(lowercase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowercase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowercase_ , lowercase_ )
| 670
|
import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 670
| 1
|
def lowerCAmelCase ( UpperCamelCase__ : int = 4_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [0, 1]
__SCREAMING_SNAKE_CASE: Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__SCREAMING_SNAKE_CASE: int = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711
|
def lowerCAmelCase ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = 0, 0, 0
__SCREAMING_SNAKE_CASE: str = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE: int = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE: Optional[int] = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: str = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 146
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def a__ ( lowerCAmelCase__ ):
if len(__UpperCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase_ = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( lowerCAmelCase__ ):
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase_ = format(__UpperCAmelCase , "08x" )[-8:]
UpperCAmelCase_ = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = b''''''
for char in message:
bit_string += format(__UpperCAmelCase , "08b" ).encode("utf-8" )
UpperCAmelCase_ = format(len(__UpperCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a__ ( lowerCAmelCase__ ):
if len(__UpperCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0 , len(__UpperCAmelCase ) , 512 ):
UpperCAmelCase_ = bit_string[pos : pos + 512]
UpperCAmelCase_ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a__ ( lowerCAmelCase__ ):
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase_ = format(__UpperCAmelCase , "032b" )
UpperCAmelCase_ = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (a + b) % 2**32
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = preprocess(__UpperCAmelCase )
UpperCAmelCase_ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase_ = 0x67452301
UpperCAmelCase_ = 0xefcdab89
UpperCAmelCase_ = 0x98badcfe
UpperCAmelCase_ = 0x10325476
UpperCAmelCase_ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
UpperCAmelCase_ = aa
UpperCAmelCase_ = ba
UpperCAmelCase_ = ca
UpperCAmelCase_ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase_ = d ^ (b & (c ^ d))
UpperCAmelCase_ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase_ = c ^ (d & (b ^ c))
UpperCAmelCase_ = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = (3 * i + 5) % 16
else:
UpperCAmelCase_ = c ^ (b | not_aa(__UpperCAmelCase ))
UpperCAmelCase_ = (7 * i) % 16
UpperCAmelCase_ = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase_ = d
UpperCAmelCase_ = c
UpperCAmelCase_ = b
UpperCAmelCase_ = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase_ = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[str] = args.log_outputs
lowercase__: Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__: Union[str, Any] = load_metric('''wer''' )
lowercase__: List[str] = load_metric('''cer''' )
# compute metrics
lowercase__: List[str] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__: List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__: Dict = F"""WER: {wer_result}\nCER: {cer_result}"""
print(__UpperCAmelCase )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(__UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__: int = F"""log_{dataset_id}_predictions.txt"""
lowercase__: Dict = F"""log_{dataset_id}_targets.txt"""
with open(__UpperCAmelCase , '''w''' ) as p, open(__UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(__UpperCAmelCase , __UpperCAmelCase ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__UpperCAmelCase , with_indices=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Any = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__: Optional[Any] = re.sub(__UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__: Tuple = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__: Dict = ''' '''.join(text.split(__UpperCAmelCase ) )
return text
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
# load dataset
lowercase__: Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__: Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__: Optional[Any] = feature_extractor.sampling_rate
# resample audio
lowercase__: str = dataset.cast_column('''audio''' , Audio(sampling_rate=__UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
lowercase__: Dict = 0 if torch.cuda.is_available() else -1
lowercase__: Optional[Any] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCAmelCase ):
lowercase__: Dict = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__: str = prediction['''text''']
lowercase__: Dict = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__: List[str] = dataset.map(__UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__A = parser.parse_args()
main(args)
| 586
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( _snake_case ):
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_snake_case )
UpperCAmelCase = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_snake_case )
EnvironmentCommand.register_subcommand(_snake_case )
TestCommand.register_subcommand(_snake_case )
RunBeamCommand.register_subcommand(_snake_case )
DummyDataCommand.register_subcommand(_snake_case )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_snake_case , """func""" ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_snake_case )
# Run
UpperCAmelCase = args.func(_snake_case , **_snake_case )
service.run()
if __name__ == "__main__":
main()
| 715
|
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a_ : str = logging.getLogger(__name__)
a_ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a_ : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if self.train_file is not None:
SCREAMING_SNAKE_CASE = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , 'r' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = [json.loads(_UpperCAmelCase) for line in f.read().splitlines() if (len(_UpperCAmelCase) > 0 and not line.isspace())]
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE = refs
return Dataset.from_dict(_UpperCAmelCase)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = data_args.validation_file
SCREAMING_SNAKE_CASE = data_args.train_file.split('.')[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE = 'text'
SCREAMING_SNAKE_CASE = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''')
config.update_from_string(model_args.config_overrides)
logger.info(F'''New config: {config}''')
SCREAMING_SNAKE_CASE = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE = datasets['train'].column_names
else:
SCREAMING_SNAKE_CASE = datasets['validation'].column_names
SCREAMING_SNAKE_CASE = 'text' if 'text' in column_names else column_names[0]
SCREAMING_SNAKE_CASE = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase):
# Remove empty lines
SCREAMING_SNAKE_CASE = [line for line in examples['text'] if len(_UpperCAmelCase) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length)
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCAmelCase)
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'train_results.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Train results *****')
for key, value in sorted(train_result.metrics.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json'))
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = perplexity
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key, value in sorted(results.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 57
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_lowercase : Dict = parser.parse_args()
_lowercase : Optional[int] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase : int = CLIPImageProcessor()
_lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_lowercase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 625
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : List[str] = '''data2vec-vision'''
def __init__( self : Optional[int] , _a : List[Any]=768 , _a : Any=12 , _a : str=12 , _a : Union[str, Any]=3_072 , _a : Union[str, Any]="gelu" , _a : List[Any]=0.0 , _a : Dict=0.0 , _a : Dict=0.02 , _a : Any=1E-12 , _a : Optional[Any]=224 , _a : Union[str, Any]=16 , _a : Tuple=3 , _a : List[Any]=False , _a : List[str]=False , _a : Dict=False , _a : Dict=False , _a : Any=0.1 , _a : List[str]=0.1 , _a : Dict=True , _a : Dict=[3, 5, 7, 11] , _a : Union[str, Any]=[1, 2, 3, 6] , _a : Optional[Any]=True , _a : Any=0.4 , _a : List[str]=256 , _a : Any=1 , _a : Any=False , _a : Union[str, Any]=255 , **_a : Tuple , ):
super().__init__(**_A )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = use_mask_token
UpperCamelCase__ = use_absolute_position_embeddings
UpperCamelCase__ = use_relative_position_bias
UpperCamelCase__ = use_shared_relative_position_bias
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ = out_indices
UpperCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = auxiliary_channels
UpperCamelCase__ = auxiliary_num_convs
UpperCamelCase__ = auxiliary_concat_input
UpperCamelCase__ = semantic_loss_ignore_index
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : int = version.parse('''1.11''' )
@property
def A_ ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : Optional[int] ):
return 1E-4
| 240
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74
| 0
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase_ : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Tuple = {"""unk_token""": """<unk>"""}
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Any ):
lowercase_ : Dict = """lower newer"""
lowercase_ : int = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Union[str, Any] = """lower newer"""
lowercase_ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : List[str] = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase_ : List[str] = """lower newer"""
# Testing tokenization
lowercase_ : Dict = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : int = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
lowercase_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
lowercase_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Dict = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing the unknown token
lowercase_ : str = tokens + [rust_tokenizer.unk_token]
lowercase_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
lowercase_ : Dict = """This is a simple input"""
lowercase_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ : Tuple = ("""This is a simple input""", """This is a pair""")
lowercase_ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowercase_ : str = """This is a simple input"""
lowercase_ : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase_ : Optional[int] = ("""This is a simple input""", """This is a pair""")
lowercase_ : Dict = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase_ : int = tokenizer.pad_token_id
lowercase_ : Any = tokenizer(lowercase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowercase_ : int = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
lowercase_ : str = tokenizer(*lowercase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowercase_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = """$$$"""
lowercase_ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ )
lowercase_ : List[Any] = """This is a simple input"""
lowercase_ : str = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ : int = tokenizer.bos_token_id
lowercase_ : Dict = tokenizer(lowercase_ )
lowercase_ : Tuple = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] , lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase_ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase_ : Tuple = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : int = """Encode this."""
lowercase_ : Tuple = """This one too please."""
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase_ : List[Any] = tokenizer.encode_plus(
lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , )
lowercase_ : int = encoded_sequence_dict["""input_ids"""]
lowercase_ : List[str] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
lowercase_ : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
lowercase_ : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ , lowercase_ )
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase_ : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
lowercase_ : Optional[int] = """A photo of a cat"""
lowercase_ : Any = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""./test_opt""" )
lowercase_ : List[Any] = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase_ )
lowercase_ : List[Any] = """A photo of a cat"""
lowercase_ : List[Any] = tokenizer.encode(
lowercase_ , )
# Same as above
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
lowercase_ : Union[str, Any] = """bos"""
lowercase_ : int = tokenizer.get_vocab()["""bos"""]
lowercase_ : Union[str, Any] = """A photo of a cat"""
lowercase_ : Dict = tokenizer.encode(
lowercase_ , )
# We changed the bos token
self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
lowercase_ : Tuple = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = BarthezTokenizer
__lowerCAmelCase : Tuple = BarthezTokenizerFast
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Any = True
def __UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
A_ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=__snake_case )
A_ = tokenizer
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = '''<pad>'''
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__snake_case ) ,1_0_1_1_2_2 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_1_1_2_2 )
@require_torch
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
A_ = self.tokenizer(
__snake_case ,max_length=len(__snake_case ) ,padding=__snake_case ,truncation=__snake_case ,return_tensors='''pt''' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = '''I was born in 92000, and this is falsé.'''
A_ = tokenizer.tokenize(__snake_case )
A_ = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
A_ = tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
A_ = rust_tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(__snake_case )
A_ = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
@slow
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A_ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='''moussaKam/mbarthez''' ,revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' ,sequences=__snake_case ,)
| 188
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :str , _UpperCAmelCase :Any ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
A_ = layer.split('''metadata''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A_ = layer.split('''kvstore''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A_ = layer.split('''/''' )
A_ = '''/'''.join(split_layer[:-1] )
A_ = (split_layer[-1],)
if "kvstore/path" in layer:
A_ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A_ = '''file'''
else:
A_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :Union[str, Any] ) -> Any:
'''simple docstring'''
A_ = rename_keys(_UpperCAmelCase )
A_ = {}
for k, v in current_block.items():
A_ = v
A_ = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :str , _UpperCAmelCase :List[Any] , _UpperCAmelCase :str , _UpperCAmelCase :str = WEIGHTS_NAME ) -> Optional[int]:
'''simple docstring'''
A_ = convert_file_size_to_int(_UpperCAmelCase )
A_ = []
A_ = {}
A_ = 0
A_ = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A_ = flatten_dict(_UpperCAmelCase , sep='''/''' )
A_ = {}
for layer in checkpoint_info.keys():
A_ , A_ , A_ = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
A_ = content
else:
A_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A_ = torch.tensor(_UpperCAmelCase )
A_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A_ , A_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
A_ = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A_ = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A_ = {}
A_ = 0
A_ = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A_ = {}
A_ = {}
for idx, shard in enumerate(_UpperCAmelCase ):
A_ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
A_ = shard
for key in shard:
A_ = shard_file
# Add the metadata
A_ = {'''total_size''': total_size}
A_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
A_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a__ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A_ = TaTokenizer.from_pretrained('''t5-small''' )
A_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
A_ = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 188
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase_ : Optional[int] = """\
"""
UpperCamelCase_ : Union[str, Any] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase_ : List[str] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def _UpperCamelCase ( self : Tuple , a : str , a : Optional[int] , a : int = 1_6 , a : bool = True , a : List[str]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : int ='''cuda'''
else:
__snake_case : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : Union[str, Any] =AutoModelForCausalLM.from_pretrained(a )
__snake_case : Dict =model.to(a )
__snake_case : List[str] =AutoTokenizer.from_pretrained(a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : Dict =list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : Union[str, Any] =model.config.max_length - 1
else:
__snake_case : Any =model.config.max_length
__snake_case : List[Any] =tokenizer(
a , add_special_tokens=a , padding=a , truncation=a , max_length=a , return_tensors='''pt''' , return_attention_mask=a , ).to(a )
__snake_case : int =encodings['''input_ids''']
__snake_case : Optional[Any] =encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : int =[]
__snake_case : List[Any] =CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a ) , a ) ):
__snake_case : Tuple =min(start_index + batch_size , len(a ) )
__snake_case : str =encoded_texts[start_index:end_index]
__snake_case : int =attn_masks[start_index:end_index]
if add_start_token:
__snake_case : Union[str, Any] =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a )
__snake_case : List[str] =torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : str =torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a ), attn_mask] , dim=1 )
__snake_case : Any =encoded_batch
with torch.no_grad():
__snake_case : Tuple =model(a , attention_mask=a ).logits
__snake_case : Union[str, Any] =out_logits[..., :-1, :].contiguous()
__snake_case : List[Any] =labels[..., 1:].contiguous()
__snake_case : Optional[int] =attn_mask[..., 1:].contiguous()
__snake_case : List[str] =torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a )}
| 497
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _lowercase ( lowerCAmelCase ):
_a : Optional[Any] = ['''vqvae''']
def __init__( self : Optional[Any] , a : AutoencoderKL , a : UNetaDConditionModel , a : Mel , a : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a , mel=a , vqvae=a )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , a ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[int] , a : int = 1 , a : str = None , a : np.ndarray = None , a : int = 0 , a : int = 0 , a : int = None , a : torch.Generator = None , a : float = 0 , a : float = 0 , a : torch.Generator = None , a : float = 0 , a : torch.Tensor = None , a : torch.Tensor = None , a : Optional[int]=True , ):
"""simple docstring"""
__snake_case : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(a )
__snake_case : int =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__snake_case : Any =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__snake_case : List[str] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=a , device=self.device , )
__snake_case : int =noise
__snake_case : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(a , a )
__snake_case : List[Any] =self.mel.audio_slice_to_image(a )
__snake_case : Tuple =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__snake_case : str =(input_image / 2_5_5) * 2 - 1
__snake_case : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__snake_case : Tuple =self.vqvae.encode(torch.unsqueeze(a , 0 ) ).latent_dist.sample(
generator=a )[0]
__snake_case : Optional[int] =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__snake_case : str =self.scheduler.add_noise(a , a , self.scheduler.timesteps[start_step - 1] )
__snake_case : Optional[Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__snake_case : int =int(mask_start_secs * pixels_per_second )
__snake_case : Any =int(mask_end_secs * pixels_per_second )
__snake_case : Union[str, Any] =self.scheduler.add_noise(a , a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , a ):
__snake_case : List[str] =self.unet(a , a , a )['''sample''']
else:
__snake_case : Union[str, Any] =self.unet(a , a )['''sample''']
if isinstance(self.scheduler , a ):
__snake_case : List[str] =self.scheduler.step(
model_output=a , timestep=a , sample=a , eta=a , generator=a , )['''prev_sample''']
else:
__snake_case : List[Any] =self.scheduler.step(
model_output=a , timestep=a , sample=a , generator=a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__snake_case : Any =mask[:, step, :, :mask_start]
if mask_end > 0:
__snake_case : Optional[Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__snake_case : str =1 / self.vqvae.config.scaling_factor * images
__snake_case : Optional[int] =self.vqvae.decode(a )['''sample''']
__snake_case : int =(images / 2 + 0.5).clamp(0 , 1 )
__snake_case : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__snake_case : Optional[int] =(images * 2_5_5).round().astype('''uint8''' )
__snake_case : Tuple =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(a , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__snake_case : Union[str, Any] =[self.mel.image_to_audio(a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(a )[:, np.newaxis, :] ) , **ImagePipelineOutput(a ) )
@torch.no_grad()
def _UpperCamelCase ( self : int , a : List[Image.Image] , a : int = 5_0 ):
"""simple docstring"""
assert isinstance(self.scheduler , a )
self.scheduler.set_timesteps(a )
__snake_case : Dict =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__snake_case : Any =(sample / 2_5_5) * 2 - 1
__snake_case : Any =torch.Tensor(a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__snake_case : Union[str, Any] =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__snake_case : Dict =self.scheduler.alphas_cumprod[t]
__snake_case : str =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__snake_case : int =1 - alpha_prod_t
__snake_case : Tuple =self.unet(a , a )['''sample''']
__snake_case : str =(1 - alpha_prod_t_prev) ** 0.5 * model_output
__snake_case : Dict =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__snake_case : Optional[int] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( a : torch.Tensor , a : torch.Tensor , a : float ):
"""simple docstring"""
__snake_case : List[Any] =acos(torch.dot(torch.flatten(a ) , torch.flatten(a ) ) / torch.norm(a ) / torch.norm(a ) )
return sin((1 - alpha) * theta ) * xa / sin(a ) + sin(alpha * theta ) * xa / sin(a )
| 497
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
def decorator(__snake_case ):
lowerCamelCase__ = getattr(__snake_case ,'''handle_key''' ,[] )
handle += [key]
setattr(__snake_case ,'''handle_key''' ,__snake_case )
return func
return decorator
def lowerCAmelCase__(*__snake_case ) -> Tuple:
'''simple docstring'''
def decorator(__snake_case ):
lowerCamelCase__ = getattr(__snake_case ,'''handle_key''' ,[] )
handle += keys
setattr(__snake_case ,'''handle_key''' ,__snake_case )
return func
return decorator
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __new__( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , '''key_handler''' ):
setattr(__lowerCAmelCase , '''key_handler''' , {} )
setattr(__lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase__ = getattr(__lowerCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
lowerCamelCase__ = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
lowerCamelCase__ = get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase__ = ord(__lowerCAmelCase )
lowerCamelCase__ = cls.key_handler.get(__lowerCAmelCase )
if handler:
lowerCamelCase__ = char
return handler(cls )
else:
return None
def lowerCAmelCase__(cls ) -> List[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 481
|
from __future__ import annotations
_a = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_a = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 481
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__a : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
__a : Optional[int] = {"target_lang": "fi", "source_lang": "en"}
__a : Optional[int] = ">>zh<<"
__a : str = "Helsinki-NLP/"
if is_torch_available():
__a : str = "pt"
elif is_tf_available():
__a : str = "tf"
else:
__a : Optional[Any] = "jax"
@require_sentencepiece
class __lowercase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MarianTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
__A = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__A = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__A = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__A = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[int] , **UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Tuple ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """</s>"""
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
__A = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__A = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__A = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__A = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.get_tokenizer()
__A = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.get_tokenizer()
__A = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__A = """Tämä on testi"""
__A = """This is a test"""
__A = [76, 7, 2_047, 2]
__A = [69, 12, 11, 940, 2]
__A = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__A = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__A = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 700
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : float | Decimal , __lowercase : float = 1_0**-1_0 ) -> float:
"""simple docstring"""
__A = a
while True:
__A = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 199
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModel.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
| 552
|
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase :int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCamelCase :Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCamelCase :Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase :Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase :int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=99 , __lowercase=16 , __lowercase=2 , __lowercase=4 , __lowercase=4 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=32 , __lowercase=2 , __lowercase=1 , __lowercase=0 , __lowercase=0.02 , ) -> List[str]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :List[str] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :Tuple = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :int = vocab_size
__UpperCamelCase :int = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :Optional[int] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :List[str] = hidden_dropout_prob
__UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase :List[Any] = max_position_embeddings
__UpperCamelCase :str = eos_token_id
__UpperCamelCase :Any = pad_token_id
__UpperCamelCase :str = bos_token_id
__UpperCamelCase :Union[str, Any] = initializer_range
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
__UpperCamelCase :Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
__UpperCamelCase :str = shift_tokens_right(__lowercase , 1 , 2)
__UpperCamelCase :str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowercase , )
__UpperCamelCase :List[Any] = prepare_blenderbot_inputs_dict(__lowercase , __lowercase , __lowercase)
return config, inputs_dict
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :Dict = 20
__UpperCamelCase :str = model_class_name(__lowercase)
__UpperCamelCase :Dict = model.encode(inputs_dict['''input_ids'''])
__UpperCamelCase :Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCamelCase :List[str] = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
__UpperCamelCase :Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase :Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , )
__UpperCamelCase :str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
__UpperCamelCase :Optional[int] = model.decode(
decoder_input_ids[:, -1:] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowercase , )
__UpperCamelCase :List[Any] = model.decode(__lowercase , __lowercase)
__UpperCamelCase :Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""")
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[Any] = 20
__UpperCamelCase :Dict = model_class_name(__lowercase)
__UpperCamelCase :Optional[Any] = model.encode(inputs_dict['''input_ids'''])
__UpperCamelCase :List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCamelCase :str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
__UpperCamelCase :Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase :Dict = model.decode(
decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , )
__UpperCamelCase :Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
__UpperCamelCase :List[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowercase , decoder_position_ids=__lowercase , )
__UpperCamelCase :int = model.decode(__lowercase , __lowercase , decoder_attention_mask=__lowercase)
__UpperCamelCase :Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""")
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : Any = 9_9
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase :Tuple = input_ids.shape[0]
__UpperCamelCase :Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = self._get_config_and_data()
__UpperCamelCase :str = FlaxBlenderbotSmallForConditionalGeneration(__lowercase)
__UpperCamelCase :List[Any] = lm_model(input_ids=__lowercase)
__UpperCamelCase :List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase :Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(__lowercase)
__UpperCamelCase :Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
__UpperCamelCase :Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
__UpperCamelCase :str = lm_model(input_ids=__lowercase , decoder_input_ids=__lowercase)
__UpperCamelCase :List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
__UpperCamelCase :str = shift_tokens_right(__lowercase , 1 , 2)
__UpperCamelCase :Dict = np.equal(__lowercase , 1).astype(np.floataa).sum()
__UpperCamelCase :Optional[int] = np.equal(__lowercase , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(__lowercase , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = True
a__ : int = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
a__ : Union[str, Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = FlaxBlenderbotSmallModelTester(self)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCamelCase :List[str] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :str = model_class(__lowercase)
@jax.jit
def encode_jitted(__lowercase , __lowercase=None , **__lowercase):
return model.encode(input_ids=__lowercase , attention_mask=__lowercase)
with self.subTest('''JIT Enabled'''):
__UpperCamelCase :List[str] = encode_jitted(**__lowercase).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
__UpperCamelCase :str = encode_jitted(**__lowercase).to_tuple()
self.assertEqual(len(__lowercase) , len(__lowercase))
for jitted_output, output in zip(__lowercase , __lowercase):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCamelCase :Union[str, Any] = model_class(__lowercase)
__UpperCamelCase :Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
__UpperCamelCase :Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowercase , __lowercase , __lowercase):
return model.decode(
decoder_input_ids=__lowercase , decoder_attention_mask=__lowercase , encoder_outputs=__lowercase , )
with self.subTest('''JIT Enabled'''):
__UpperCamelCase :Union[str, Any] = decode_jitted(**__lowercase).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
__UpperCamelCase :Union[str, Any] = decode_jitted(**__lowercase).to_tuple()
self.assertEqual(len(__lowercase) , len(__lowercase))
for jitted_output, output in zip(__lowercase , __lowercase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase :Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase :Tuple = np.ones((1, 1)) * model.config.eos_token_id
__UpperCamelCase :List[Any] = model(__lowercase)
self.assertIsNotNone(__lowercase)
| 707
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowercase = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
a__ : bool = None
a__ : bool = None
class lowerCamelCase_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
a__ : List[Any] = datasets.Audio()
a__ : int = """audio"""
a__ : str = AudioFolderConfig
a__ : List[str] # definition at the bottom of the script
a__ : int = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__lowercase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
__lowercase = AUDIO_EXTENSIONS
| 452
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def snake_case ( snake_case__ :Dict) -> Tuple:
_A , _A = image.size
_A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""])
_A = np.array(snake_case__).astype(np.floataa) / 255.0
_A = image[None].transpose(0 , 3 , 1 , 2)
_A = torch.from_numpy(snake_case__)
return 2.0 * image - 1.0
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_A = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
_A = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}''' )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_A = preprocess(lowerCAmelCase_ )
_A , _A = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A = (batch_size, self.unet.config.in_channels // 2, height, width)
_A = next(self.unet.parameters() ).dtype
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
_A = image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
_A = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
_A = torch.cat([latents, image] , dim=1 )
_A = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
_A = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
_A = self.vqvae.decode(lowerCAmelCase_ ).sample
_A = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
_A = image / 2 + 0.5
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 401
|
import math
def snake_case ( snake_case__ :int) -> list:
_A = [True] * n
_A = False
_A = False
_A = True
for i in range(3 , int(n**0.5 + 1) , 2):
_A = i * 2
while index < n:
_A = False
_A = index + i
_A = [2]
for i in range(3 , snake_case__ , 2):
if is_prime[i]:
primes.append(snake_case__)
return primes
def snake_case ( snake_case__ :int = 999_966_663_333) -> int:
_A = math.floor(math.sqrt(snake_case__)) + 100
_A = prime_sieve(snake_case__)
_A = 0
_A = 0
_A = primes[prime_index]
while (last_prime**2) <= limit:
_A = primes[prime_index + 1]
_A = last_prime**2
_A = next_prime**2
# Get numbers divisible by lps(current)
_A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 401
| 1
|
def lowerCamelCase__ ( a : Dict , a : List[str] = 0 ) -> list:
"""simple docstring"""
a__ :int = length or len(__snake_case )
a__ :str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a__ , a__ :List[str] = list_data[i + 1], list_data[i]
a__ :Any = True
return list_data if not swapped else bubble_sort(__snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowerCamelCase__ ( a : np.ndarray , a : float , a : int = 16_000 ) -> List[str]:
"""simple docstring"""
a__ :Optional[int] = int(round(sample_rate * max_length ) )
if len(a ) <= sample_length:
return wav
a__ :Optional[Any] = randint(0 , len(a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(default=_a ,metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase_ = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
lowerCamelCase_ = field(
default='validation' ,metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
lowerCamelCase_ = field(
default='audio' ,metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} ,)
lowerCamelCase_ = field(
default='label' ,metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
lowerCamelCase_ = field(
default=20 ,metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} ,)
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(
default='facebook/wav2vec2-base' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,)
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ :Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , a , a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ :Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
a__ :List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ :List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
a__ :List[str] = DatasetDict()
a__ :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
a__ :List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
a__ :str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
a__ :List[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
a__ :Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(a : Union[str, Any] ):
a__ :List[str] = []
for audio in batch[data_args.audio_column_name]:
a__ :List[Any] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(a )
a__ :Dict = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
a__ :Dict = {model_input_name: inputs.get(a )}
a__ :Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(a : Optional[int] ):
a__ :Any = [audio["array"] for audio in batch[data_args.audio_column_name]]
a__ :int = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
a__ :int = {model_input_name: inputs.get(a )}
a__ :Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a__ :Union[str, Any] = raw_datasets["train"].features[data_args.label_column_name].names
a__ , a__ :str = {}, {}
for i, label in enumerate(a ):
a__ :Tuple = str(a )
a__ :List[str] = label
# Load the accuracy metric from the datasets package
a__ :Optional[int] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(a : List[Any] ):
a__ :str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=a , references=eval_pred.label_ids )
a__ :Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a ) , labelaid=a , idalabel=a , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ :Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
a__ :Tuple = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(a , output_all_columns=a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a__ :Optional[int] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(a , output_all_columns=a )
# Initialize our trainer
a__ :List[str] = Trainer(
model=a , args=a , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=a , tokenizer=a , )
# Training
if training_args.do_train:
a__ :Tuple = None
if training_args.resume_from_checkpoint is not None:
a__ :List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ :Any = last_checkpoint
a__ :Any = trainer.train(resume_from_checkpoint=a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a__ :int = trainer.evaluate()
trainer.log_metrics("eval" , a )
trainer.save_metrics("eval" , a )
# Write model card and (optionally) push to hub
a__ :str = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
if __name__ == "__main__":
main()
| 373
| 0
|
'''simple docstring'''
from typing import Any
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = data
UpperCAmelCase_ : List[str] = None
class _snake_case :
def __init__( self ):
UpperCAmelCase_ : str = None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.head
while temp is not None:
print(temp.data ,end=" " )
UpperCAmelCase_ : Optional[Any] = temp.next
print()
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = Node(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.head
UpperCAmelCase_ : List[str] = new_node
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if node_data_a == node_data_a:
return
else:
UpperCAmelCase_ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
UpperCAmelCase_ : str = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase_ , UpperCAmelCase_ : Any = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 71
|
'''simple docstring'''
from math import factorial
__A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A__ ) )
def UpperCamelCase_ ( A__ : int = 60 , A__ : int = 1_00_00_00 ):
'''simple docstring'''
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCAmelCase_ : Optional[int] = 0
# the cached sizes of the previous chains
lowerCAmelCase_ : dict[int, int] = {}
for start_chain_element in range(1 , A__ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ : List[str] = set()
lowerCAmelCase_ : Optional[int] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ : int = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A__ )
chain_set_length += 1
lowerCAmelCase_ : Union[str, Any] = digit_factorial_sum(A__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ : int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 275
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase :Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase :List[Any] = "RegNetConfig"
# Base docstring
__UpperCAmelCase :List[Any] = "facebook/regnet-y-040"
__UpperCAmelCase :Union[str, Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__UpperCAmelCase :int = "facebook/regnet-y-040"
__UpperCAmelCase :Optional[Any] = "tabby, tabby cat"
__UpperCAmelCase :Dict = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : int , snake_case : int = 3 , snake_case : int = 1 , snake_case : int = 1 , snake_case : Optional[str] = "relu" , **snake_case : Any , ) -> Union[str, Any]:
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='''VALID''' , groups=snake_case , use_bias=snake_case , name='''convolution''' , )
__UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__UpperCAmelCase : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Any , snake_case : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.convolution(self.padding(snake_case ) )
__UpperCAmelCase : List[Any] = self.normalization(snake_case )
__UpperCAmelCase : Optional[Any] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : RegNetConfig , **snake_case : Tuple ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = config.num_channels
__UpperCAmelCase : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> int:
__UpperCAmelCase : int = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCAmelCase : Dict = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
__UpperCAmelCase : List[str] = self.embedder(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : int , snake_case : int = 2 , **snake_case : Tuple ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : str = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='''convolution''' )
__UpperCAmelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowerCamelCase__ ( self : str , snake_case : tf.Tensor , snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : int , **snake_case : Tuple ) -> List[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
__UpperCAmelCase : Dict = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCAmelCase : str = self.pooler(snake_case )
for layer_module in self.attention:
__UpperCAmelCase : int = layer_module(snake_case )
__UpperCAmelCase : List[Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : int ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : Any = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Optional[int] = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.2''' ),
]
__UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : Tuple = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : List[str] ) -> Optional[int]:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Any = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__UpperCAmelCase : List[str] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.3''' ),
]
__UpperCAmelCase : Dict = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : int = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 2 , snake_case : int = 2 , **snake_case : str ) -> Optional[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCAmelCase : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='''layers.0''' ),
*[layer(snake_case , snake_case , snake_case , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : List[str] , snake_case : Any ) -> List[Any]:
for layer_module in self.layers:
__UpperCAmelCase : Optional[Any] = layer_module(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , **snake_case : int ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__UpperCAmelCase : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=f'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , snake_case : tf.Tensor , snake_case : bool = False , snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase : Any = hidden_states + (hidden_state,)
__UpperCAmelCase : List[Any] = stage_module(snake_case )
if output_hidden_states:
__UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RegNetConfig
def __init__( self : Dict , snake_case : str , **snake_case : Optional[int] ) -> Any:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = config
__UpperCAmelCase : List[str] = TFRegNetEmbeddings(snake_case , name='''embedder''' )
__UpperCAmelCase : List[str] = TFRegNetEncoder(snake_case , name='''encoder''' )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
@unpack_inputs
def lowerCamelCase__ ( self : Dict , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[Any] = self.embedder(snake_case , training=snake_case )
__UpperCAmelCase : Optional[int] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : List[str] = encoder_outputs[0]
__UpperCAmelCase : str = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
__UpperCAmelCase : Optional[Any] = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
__UpperCAmelCase : str = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCAmelCase : Dict = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = "regnet"
SCREAMING_SNAKE_CASE : List[Any] = "pixel_values"
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCAmelCase :Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase :List[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _a , )
class a ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case : RegNetConfig , *snake_case : Optional[int] , **snake_case : List[str] ) -> Tuple:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : Dict = TFRegNetMainLayer(snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : str=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Dict = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class a ( _a , _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : RegNetConfig , *snake_case : Optional[Any] , **snake_case : List[Any] ) -> List[Any]:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : List[Any] = config.num_labels
__UpperCAmelCase : Optional[int] = TFRegNetMainLayer(snake_case , name='''regnet''' )
# classification head
__UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor = None , snake_case : tf.Tensor = None , snake_case : bool = None , snake_case : bool = None , snake_case : Tuple=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Tuple = self.classifier[0](snake_case )
__UpperCAmelCase : Tuple = self.classifier[1](snake_case )
__UpperCAmelCase : Any = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
__UpperCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
| 720
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase :Optional[int] = TypeVar("KEY")
__UpperCAmelCase :Tuple = TypeVar("VAL")
@dataclass(frozen=_a , slots=_a )
class a ( Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class a ( _Item ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
super().__init__(snake_case , snake_case )
def __bool__( self : Dict ) -> bool:
return False
__UpperCAmelCase :Optional[Any] = _DeletedItem()
class a ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : str , snake_case : int = 8 , snake_case : float = 0.75 ) -> None:
__UpperCAmelCase : Dict = initial_block_size
__UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCAmelCase : str = capacity_factor
__UpperCAmelCase : Optional[int] = 0
def lowerCamelCase__ ( self : Optional[Any] , snake_case : KEY ) -> int:
return hash(snake_case ) % len(self._buckets )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> int:
return (ind + 1) % len(self._buckets )
def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : KEY , snake_case : VAL ) -> bool:
__UpperCAmelCase : int = self._buckets[ind]
if not stored:
__UpperCAmelCase : List[str] = _Item(snake_case , snake_case )
self._len += 1
return True
elif stored.key == key:
__UpperCAmelCase : int = _Item(snake_case , snake_case )
return True
else:
return False
def lowerCamelCase__ ( self : str ) -> bool:
__UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case )
def lowerCamelCase__ ( self : List[str] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> None:
__UpperCAmelCase : Union[str, Any] = self._buckets
__UpperCAmelCase : Union[str, Any] = [None] * new_size
__UpperCAmelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase__ ( self : Optional[int] ) -> None:
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase__ ( self : List[str] ) -> None:
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : KEY ) -> Iterator[int]:
__UpperCAmelCase : Tuple = self._get_bucket_index(snake_case )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCAmelCase : Dict = self._get_next_ind(snake_case )
def lowerCamelCase__ ( self : str , snake_case : KEY , snake_case : VAL ) -> None:
for ind in self._iterate_buckets(snake_case ):
if self._try_set(snake_case , snake_case , snake_case ):
break
def __setitem__( self : List[str] , snake_case : KEY , snake_case : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(snake_case , snake_case )
def __delitem__( self : Any , snake_case : KEY ) -> None:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(snake_case )
if item is _deleted:
continue
if item.key == key:
__UpperCAmelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , snake_case : KEY ) -> VAL:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : str ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__UpperCAmelCase : str = ''' ,'''.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 266
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split("/" )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
_lowerCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 705
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self , UpperCAmelCase__=2000 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=1e-3 ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase__ , device=UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE__ = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE__ = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -score / std
# compute
SCREAMING_SNAKE_CASE__ = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE__ = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE__ = torch.sqrt(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE__ = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE__ = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 112
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase : int = '\nHuman: <<task>>\n\nAssistant: '
__lowerCAmelCase : str = 'huggingface-tools/default-prompts'
__lowerCAmelCase : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def a__ ( A_, A_, A_="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
__magic_name__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""", A_ ) is not None:
return prompt_or_repo_id
__magic_name__ = cached_file(
A_, PROMPT_FILES[mode], repo_type="""dataset""", user_agent={"""agent""": agent_name} )
with open(A_, """r""", encoding="""utf-8""" ) as f:
return f.read()
| 529
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """trocr"""
a__ = ["""past_key_values"""]
a__ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=5_0265 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = activation_function
__magic_name__ = max_position_embeddings
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = init_std
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = scale_embedding
__magic_name__ = use_learned_position_embeddings
__magic_name__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 529
| 1
|
'''simple docstring'''
def _a ( _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [False] * len(_lowercase )
__UpperCAmelCase : Union[str, Any] = []
queue.append(_lowercase )
__UpperCAmelCase : List[Any] = True
while queue:
__UpperCAmelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowercase )
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = u
return visited[t]
def _a ( _lowercase : Any , _lowercase : Dict , _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [-1] * (len(_lowercase ))
__UpperCAmelCase : int = 0
while bfs(_lowercase , _lowercase , _lowercase , _lowercase ):
__UpperCAmelCase : int = float('''Inf''' )
__UpperCAmelCase : Any = sink
while s != source:
# Find the minimum value in select path
__UpperCAmelCase : int = min(_lowercase , graph[parent[s]][s] )
__UpperCAmelCase : int = parent[s]
max_flow += path_flow
__UpperCAmelCase : Optional[Any] = sink
while v != source:
__UpperCAmelCase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCAmelCase : str = parent[v]
return max_flow
__UpperCAmelCase :Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase :Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 266
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : str , snake_case : Union[str, Any]=13 , snake_case : int=7 , snake_case : Union[str, Any]=True , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Tuple=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : int=4 , snake_case : Union[str, Any]=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Dict=16 , snake_case : Dict=2 , snake_case : int=0.02 , snake_case : Dict=3 , snake_case : Tuple=4 , snake_case : Any=None , ) -> Tuple:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : List[str] = scope
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple ) -> Dict:
__UpperCAmelCase : Any = NystromformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Tuple ) -> List[Any]:
__UpperCAmelCase : List[Any] = NystromformerForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : int , snake_case : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : Any ) -> List[str]:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = NystromformerForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str] ) -> Optional[Any]:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : int = NystromformerForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : List[str] = NystromformerForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : str = NystromformerModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = NystromformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case )[0]
__UpperCAmelCase : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : Tuple = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> str:
__UpperCAmelCase : str = '''the [MASK] of Belgium is Brussels'''
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Tuple = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Optional[Any] = tokenizer(snake_case , return_tensors='''pt''' )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(encoding.input_ids ).logits
__UpperCAmelCase : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case ) , '''capital''' )
| 266
| 1
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase__ ( A_: str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__UpperCAmelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__UpperCAmelCase =F'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowercase__ ( A_: dict ) -> dict:
"""simple docstring"""
__UpperCAmelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__UpperCAmelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__UpperCAmelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__UpperCAmelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(A_ , A_ ):
__UpperCAmelCase =""", """.join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__A = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 68
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: str = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """lxmert"""
lowerCamelCase__ = {}
def __init__( self : Optional[Any] , __snake_case : Dict=30522 , __snake_case : Dict=768 , __snake_case : Any=12 , __snake_case : List[str]=9500 , __snake_case : List[str]=1600 , __snake_case : List[Any]=400 , __snake_case : Optional[Any]=3072 , __snake_case : int="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : int=512 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-12 , __snake_case : Dict=9 , __snake_case : str=5 , __snake_case : Optional[int]=5 , __snake_case : int=2048 , __snake_case : Any=4 , __snake_case : Dict=6.67 , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Any=True , __snake_case : str=True , **__snake_case : Any , ) -> Any:
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Dict = num_qa_labels
UpperCAmelCase : Optional[Any] = num_object_labels
UpperCAmelCase : int = num_attr_labels
UpperCAmelCase : str = l_layers
UpperCAmelCase : Optional[Any] = x_layers
UpperCAmelCase : str = r_layers
UpperCAmelCase : Any = visual_feat_dim
UpperCAmelCase : int = visual_pos_dim
UpperCAmelCase : Tuple = visual_loss_normalizer
UpperCAmelCase : Tuple = task_matched
UpperCAmelCase : Union[str, Any] = task_mask_lm
UpperCAmelCase : Optional[Any] = task_obj_predict
UpperCAmelCase : Tuple = task_qa
UpperCAmelCase : Dict = visual_obj_loss
UpperCAmelCase : List[Any] = visual_attr_loss
UpperCAmelCase : Dict = visual_feat_loss
UpperCAmelCase : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__snake_case )
| 127
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowercase__ = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCamelCase ( _snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : int ,_snake_case : List[str] ):
'''simple docstring'''
lowercase__ = dct.pop(_snake_case )
lowercase__ = val
def lowerCamelCase ( _snake_case : str ,_snake_case : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(_snake_case ,requires_grad=_snake_case ), v_bias) )
lowercase__ = qkv_bias
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ = 364 if "coco" in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" ,vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" ,vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=_snake_case ,text_config=_snake_case ,qformer_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Optional[Any]=None ,_snake_case : Optional[int]=False ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("bert-base-uncased" ,truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" ,truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" ,truncation_side="left" ,bos_token="</s>" ,unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowercase__ , lowercase__ = get_blipa_config(_snake_case )
lowercase__ = InstructBlipForConditionalGeneration(_snake_case ).eval()
lowercase__ = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowercase__ , lowercase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowercase__ = "cuda:1" if torch.cuda.is_available() else "cpu"
lowercase__ = "cuda:2" if torch.cuda.is_available() else "cpu"
lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess(
name=_snake_case ,model_type=_snake_case ,is_eval=_snake_case ,device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
lowercase__ = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
lowercase__ = key.replace("self" ,"attention" )
if "llm_proj" in key:
lowercase__ = key.replace("llm_proj" ,"language_projection" )
if "t5_proj" in key:
lowercase__ = key.replace("t5_proj" ,"language_projection" )
if key.startswith("llm_model" ):
lowercase__ = key.replace("llm_model" ,"language_model" )
if key.startswith("t5" ):
lowercase__ = key.replace("t5" ,"language" )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(_snake_case ,_snake_case )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_snake_case ,strict=_snake_case )
lowercase__ = load_demo_image()
lowercase__ = "What is unusual about this image?"
# create processor
lowercase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=_snake_case ,image_std=_snake_case )
lowercase__ = InstructBlipProcessor(
image_processor=_snake_case ,tokenizer=_snake_case ,qformer_tokenizer=_snake_case ,)
lowercase__ = processor(images=_snake_case ,text=_snake_case ,return_tensors="pt" ).to(_snake_case )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowercase__ = hf_model(**_snake_case ).logits
else:
lowercase__ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowercase__ = tokenizer("\n" ,return_tensors="pt" ).input_ids.to(_snake_case )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
lowercase__ = hf_model(**_snake_case ,labels=_snake_case ).logits
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_snake_case ,atol=_snake_case )
print("Looks ok!" )
print("Generating with original model..." )
lowercase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowercase__ = hf_model.generate(
**_snake_case ,do_sample=_snake_case ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print("Original generation:" ,_snake_case )
lowercase__ = processor.batch_decode(_snake_case ,skip_special_tokens=_snake_case )
lowercase__ = [text.strip() for text in output_text]
print("HF generation:" ,_snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 539
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=16 ,UpperCAmelCase_=[32, 64, 128] ,UpperCAmelCase_=[1, 2, 1] ,UpperCAmelCase_=[2, 2, 4] ,UpperCAmelCase_=2 ,UpperCAmelCase_=2.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=10 ,UpperCAmelCase_=8 ,UpperCAmelCase_=["stage1", "stage2"] ,UpperCAmelCase_=[1, 2] ,) -> Dict:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def _a ( self ) -> int:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Any:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = FocalNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = self.type_sequence_label_size
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[str]:
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ :str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ :str = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Dict = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Union[str, Any] = False
def _a ( self ) -> Any:
lowercase__ = FocalNetModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,embed_dim=37 ,has_text_modality=UpperCAmelCase_ )
def _a ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> List[Any]:
return
def _a ( self ) -> Tuple:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _a ( self ) -> Dict:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _a ( self ) -> str:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _a ( self ) -> Union[str, Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
# FocalNet has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = reshaped_hidden_states[0].shape
lowercase__ = (
reshaped_hidden_states[0].view(UpperCAmelCase_ ,UpperCAmelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _a ( self ) -> str:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
@slow
def _a ( self ) -> Dict:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FocalNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[str]:
lowercase__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Tuple = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ :int = FocalNetConfig
lowerCAmelCase__ :List[Any] = False
def _a ( self ) -> Optional[int]:
lowercase__ = FocalNetModelTester(self )
| 539
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.