code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import sys
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = len(_UpperCamelCase )
snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )]
snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )]
for chain_length in range(2 , _UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
snake_case_ : Dict = a + chain_length - 1
snake_case_ : Tuple = sys.maxsize
for c in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
snake_case_ : List[str] = cost
snake_case_ : int = c
return matrix, sol
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
if i == j:
print('''A''' + str(_UpperCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(_UpperCamelCase , _UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCamelCase , optimal_solution[i][j] + 1 , _UpperCamelCase )
print(''')''' , end=''' ''' )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = [30, 35, 15, 5, 10, 20, 25]
snake_case_ : str = len(_UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
snake_case_ , snake_case_ : Optional[Any] = matrix_chain_order(_UpperCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 60 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCamelCase = json.load(f)
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = f'facebook/wmt19-{pair}'
lowerCAmelCase__ = self.get_tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = bleu_data[pair]["src"]
lowerCAmelCase__ = bleu_data[pair]["tgt"]
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" , truncation=SCREAMING_SNAKE_CASE__ , padding="longest" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(scores["bleu"] , SCREAMING_SNAKE_CASE__ )
| 61 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
_lowerCamelCase : Optional[Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) )
SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"]
SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : str = params["dim"]
SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : Dict = n_heads
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : Dict = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : List[Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : str = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[str] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) )
SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56
SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 63 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
UpperCAmelCase__ : List[Any] = b * b - 4 * a * c
UpperCAmelCase__ : Any = (-b + sqrt(__UpperCamelCase )) / (2 * a)
UpperCAmelCase__ : Optional[Any] = (-b - sqrt(__UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 65 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=2 , _lowerCAmelCase=2_4 , _lowerCAmelCase=1_6 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1_0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , _lowerCAmelCase=2 , _lowerCAmelCase=2 , ):
_lowercase : List[Any] = parent
_lowercase : Dict = batch_size
_lowercase : int = patch_size
_lowercase : int = max_length
_lowercase : Optional[Any] = num_mel_bins
_lowercase : Any = is_training
_lowercase : int = use_labels
_lowercase : Dict = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = type_sequence_label_size
_lowercase : List[str] = initializer_range
_lowercase : List[Any] = scope
_lowercase : str = frequency_stride
_lowercase : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowercase : int = (self.max_length - self.patch_size) // self.time_stride + 1
_lowercase : List[Any] = frequency_out_dimension * time_out_dimension
_lowercase : Tuple = num_patches + 2
def __a ( self ):
_lowercase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowercase : Dict = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = self.get_config()
return config, input_values, labels
def __a ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = ASTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ):
_lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : str = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __a ( self ):
_lowercase : Optional[Any] = ASTModelTester(self )
_lowercase : List[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_lowerCAmelCase )
_lowercase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : Tuple = ['input_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = ASTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : List[Any] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_lowercase , _lowercase : Tuple = torchaudio.load(SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def __a ( self ):
_lowercase : List[Any] = self.default_feature_extractor
_lowercase : List[Any] = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_lowerCAmelCase )
_lowercase : List[Any] = self.default_feature_extractor
_lowercase , _lowercase : Any = prepare_audio()
_lowercase : int = audio.squeeze().numpy()
_lowercase : List[str] = feature_extractor(_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : List[str] = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Any = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : Dict = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
snake_case = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
snake_case = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
snake_case = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
snake_case = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
snake_case = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
snake_case = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
snake_case = """"""
snake_case = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
snake_case = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
snake_case = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Dict ) -> Optional[int]:
assert ReadMe.from_string(snake_case__ , snake_case__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Optional[int] ) -> int:
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path='root' ) ) ):
_lowercase = ReadMe.from_string(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Optional[Any] ) -> int:
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Union[str, Any]:
ReadMe.from_string(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :int ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
_lowercase = ReadMe.from_readme(snake_case__ , snake_case__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
_lowercase = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
_lowercase = ReadMe.from_readme(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
_lowercase = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
ReadMe.from_readme(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
ReadMe.from_readme(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ ) | 67 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 663 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a_ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(a_ , "num_encoder_blocks" ) )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , a_ : Any , a_ : Tuple=13 , a_ : Optional[Any]=64 , a_ : str=3 , a_ : Any=4 , a_ : List[str]=[2, 2, 2, 2] , a_ : Optional[int]=[8, 4, 2, 1] , a_ : List[str]=[16, 32, 64, 128] , a_ : Union[str, Any]=[1, 4, 8, 16] , a_ : Dict=[1, 2, 4, 8] , a_ : Tuple=True , a_ : Optional[int]=True , a_ : int="gelu" , a_ : Optional[Any]=0.1 , a_ : Optional[int]=0.1 , a_ : int=0.02 , a_ : Optional[int]=3 , a_ : Any=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = sr_ratios
__snake_case = depths
__snake_case = hidden_sizes
__snake_case = downsampling_rates
__snake_case = num_attention_heads
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : List[Any] , a_ : int , a_ : List[str] , a_ : Dict ):
"""simple docstring"""
__snake_case = SegformerModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
__snake_case = __snake_case = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A ( self : int , a_ : List[Any] , a_ : Dict , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = SegformerForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : Optional[int] , a_ : str , a_ : Dict , a_ : str ):
"""simple docstring"""
__snake_case = 1
__snake_case = SegformerForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
__snake_case = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a_ )
__snake_case = model(a_ , labels=a_ )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Any ):
"""simple docstring"""
__snake_case = SegformerModelTester(self )
__snake_case = SegformerConfigTester(self , config_class=a_ )
def A ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a_ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def A ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case = outputs.attentions
__snake_case = sum(self.model_tester.depths )
self.assertEqual(len(a_ ) , a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
# verify the first attentions (first block, first layer)
__snake_case = (self.model_tester.image_size // 4) ** 2
__snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case = (self.model_tester.image_size // 32) ** 2
__snake_case = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case = len(a_ )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 1 , len(a_ ) )
__snake_case = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
# verify the first attentions (first block, first layer)
__snake_case = (self.model_tester.image_size // 4) ** 2
__snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A ( self : int ):
"""simple docstring"""
def check_hidden_states_output(a_ : List[Any] , a_ : List[Any] , a_ : Tuple ):
__snake_case = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case = outputs.hidden_states
__snake_case = self.model_tester.num_encoder_blocks
self.assertEqual(len(a_ ) , a_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a_ , a_ , a_ )
def A ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : int ):
"""simple docstring"""
pass
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = SegformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __UpperCAmelCase ( ) -> List[Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
__snake_case = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" )
__snake_case = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
__snake_case = model(a_ )
__snake_case = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
__snake_case = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" )
__snake_case = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
__snake_case = model(a_ )
__snake_case = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a_ , atol=1e-1 ) )
@slow
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
__snake_case = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" )
__snake_case = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
__snake_case = model(a_ )
__snake_case = outputs.logits.detach().cpu()
__snake_case = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(500, 300)] )
__snake_case = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a_ )
__snake_case = image_processor.post_process_semantic_segmentation(outputs=a_ )
__snake_case = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a_ )
| 69 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
import random
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = num - 1
lowerCamelCase_ = 0
while s % 2 == 0:
lowerCamelCase_ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ = random.randrange(2 , num - 1 )
lowerCamelCase_ = pow(lowercase , lowercase , lowercase )
if v != 1:
lowerCamelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ = i + 1
lowerCamelCase_ = (v**2) % num
return True
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if num < 2:
return False
lowerCamelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_24 ):
'''simple docstring'''
while True:
lowerCamelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
lowerCamelCase : List[Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 70 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
UpperCAmelCase_ : int = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase_ : List[str] = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
UpperCAmelCase_ : Any = len(str(_SCREAMING_SNAKE_CASE ).split("." )[1] )
UpperCAmelCase_ : Any = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase_ : Any = 10**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = denominator, numerator
while True:
UpperCAmelCase_ : List[str] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_ : int = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_ : Dict = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 71 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'swin'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=2_24 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=32 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =depths
lowercase =len(snake_case_ )
lowercase =num_heads
lowercase =window_size
lowercase =mlp_ratio
lowercase =qkv_bias
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =drop_path_rate
lowercase =hidden_act
lowercase =use_absolute_embeddings
lowercase =layer_norm_eps
lowercase =initializer_range
lowercase =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase =int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = version.parse('1.11' )
@property
def _A( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _A( self ):
return 1E-4
| 72 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''canine'''
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1_6384 , a=16 , a=0.02 , a=1E-12 , a=0 , a=0xe0_00 , a=0xe0_01 , a=4 , a=4 , a=8 , a=1_6384 , a=128 , **a , ) -> str:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE = downsampling_rate
SCREAMING_SNAKE_CASE = upsampling_kernel_size
SCREAMING_SNAKE_CASE = num_hash_functions
SCREAMING_SNAKE_CASE = num_hash_buckets
SCREAMING_SNAKE_CASE = local_transformer_stride
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> list:
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
UpperCAmelCase__ : int = []
def generate(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = [0] * n
res.append(tuple(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = arr[i], arr[0]
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = arr[i], arr[c[i]]
res.append(tuple(lowerCAmelCase__ ) )
c[i] += 1
UpperCAmelCase__ : List[Any] = 0
else:
UpperCAmelCase__ : Optional[Any] = 0
i += 1
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 75 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCamelCase : str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "retribert"
def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders
SCREAMING_SNAKE_CASE__ : int = projection_dim
| 663 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = [[float('''inf''' ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
__lowercase : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase : List[Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase , __UpperCamelCase )
return dist, v
if __name__ == "__main__":
a_ = int(input('Enter number of vertices: '))
a_ = int(input('Enter number of edges: '))
a_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ = int(input('Enter source:'))
a_ = int(input('Enter destination:'))
a_ = float(input('Enter weight:'))
a_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 76 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
while a != 0:
__UpperCAmelCase , __UpperCAmelCase : str = b % a, a
return b
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
__UpperCAmelCase : Optional[Any] = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = 1, 0, a
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = 0, 1, m
while va != 0:
__UpperCAmelCase : List[str] = ua // va
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 77 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Union[str, Any] = ["""pixel_values"""]
def __init__(self : List[str] , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[str] , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase (self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ):
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowercase (self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(__a )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def _lowercase (self : int , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : str , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : str , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__a )
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
SCREAMING_SNAKE_CASE__ : List[str] = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
SCREAMING_SNAKE_CASE__ : Dict = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__ : str = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__ : List[str] = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__ : Dict = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
SCREAMING_SNAKE_CASE__ : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
SCREAMING_SNAKE_CASE__ : str = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
SCREAMING_SNAKE_CASE__ : Dict = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Tuple = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : Tuple = """"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
SCREAMING_SNAKE_CASE__ : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
assert ReadMe.from_string(__lowerCamelCase , __lowerCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
with pytest.raises(__lowerCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
UpperCAmelCase__ : str = ReadMe.from_string(__lowerCamelCase , __lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(__lowerCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
ReadMe.from_string(__lowerCamelCase , __lowerCamelCase , suppress_parsing_errors=__lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[Any] = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : str = ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : str = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : int = expected_error.format(path=__lowerCamelCase )
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
UpperCAmelCase__ : str = ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : int = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : List[str] = expected_error.format(path=__lowerCamelCase )
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase , suppress_parsing_errors=__lowerCamelCase )
| 79 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__snake_case :ClassVar[Features] = Features({'audio': Audio()} )
__snake_case :ClassVar[Features] = Features({'labels': ClassLabel} )
__snake_case :str = "audio"
__snake_case :str = "labels"
def _a ( self : Any , _lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _lowerCAmelCase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _a ( self : Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 80 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Dict = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"]
return input_features
def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
| 663 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=2 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=False , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=32 * 4 , lowerCamelCase : List[Any]=32 * 6 , lowerCamelCase : Tuple=4 , lowerCamelCase : Tuple=32 , ) -> int:
__snake_case : List[str] = parent
__snake_case : int = batch_size
__snake_case : List[str] = is_training
__snake_case : Dict = use_auxiliary_loss
__snake_case : Dict = num_queries
__snake_case : List[str] = num_channels
__snake_case : Tuple = min_size
__snake_case : Optional[int] = max_size
__snake_case : int = num_labels
__snake_case : int = mask_feature_size
def __snake_case ( self : List[Any] ) -> int:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase )
__snake_case : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase )
__snake_case : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase ) > 0.5
).float()
__snake_case : Dict = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase ) > 0.5).long()
__snake_case : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case ( self : Any ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __snake_case ( self : int ) -> Dict:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : Tuple = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> List[str]:
__snake_case : Any = output.encoder_hidden_states
__snake_case : List[str] = output.pixel_decoder_hidden_states
__snake_case : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , config.decoder_config.decoder_layers )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=False ) -> Dict:
with torch.no_grad():
__snake_case : int = MaskFormerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
__snake_case : Union[str, Any] = model(lowerCamelCase , output_hidden_states=lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
def comm_check_on_output(lowerCamelCase : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__snake_case : Dict = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
__snake_case : Optional[Any] = model(lowerCamelCase )
comm_check_on_output(lowerCamelCase )
__snake_case : str = model(
pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
comm_check_on_output(lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : int = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : Union[str, Any] = MaskFormerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __snake_case ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __snake_case ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __snake_case ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case ( self : str ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> List[Any]:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__snake_case : int = MaskFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Union[str, Any] = (self.model_tester.min_size,) * 2
__snake_case : List[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase ).long(),
}
__snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase )
__snake_case : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(lowerCamelCase ).to(lowerCamelCase )
__snake_case : Dict = model(**lowerCamelCase , output_attentions=lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def __snake_case ( self : Optional[int] ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : int = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
__snake_case : Optional[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : str = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : Any ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : List[Any] = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
__snake_case : Union[str, Any] = True
__snake_case : Optional[int] = True
__snake_case : Any = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : str = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
__snake_case : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__snake_case : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__snake_case : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__snake_case : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : int = 1E-4
def lowerCAmelCase_ ( ):
__snake_case : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Optional[Any] ) -> List[str]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case : Tuple = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(lowerCamelCase )
__snake_case : Dict = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : Optional[int] = model(**lowerCamelCase )
__snake_case : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
__snake_case : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
__snake_case : Tuple = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Any = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : str = model(**lowerCamelCase )
# masks_queries_logits
__snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
__snake_case : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Optional[int] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Any ) -> int:
__snake_case : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Tuple = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : Optional[Any] = model(**lowerCamelCase )
# masks_queries_logits
__snake_case : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Optional[int] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__snake_case : List[Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
__snake_case : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
__snake_case : str = inputs["pixel_values"].to(lowerCamelCase )
__snake_case : Tuple = [el.to(lowerCamelCase ) for el in inputs["mask_labels"]]
__snake_case : Tuple = [el.to(lowerCamelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''facebook/bart-large-mnli'''
UpperCamelCase = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCamelCase = '''text_classifier'''
UpperCamelCase = AutoTokenizer
UpperCamelCase = AutoModelForSequenceClassification
UpperCamelCase = ['''text''', ['''text''']]
UpperCamelCase = ['''text''']
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().setup()
UpperCAmelCase_ = self.model.config
UpperCAmelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase_ = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase__ ( self : Any , _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 82 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase__ = '''pytorch_model.bin'''
lowerCAmelCase__ = '''pytorch_model.bin.index.json'''
lowerCAmelCase__ = '''adapter_config.json'''
lowerCAmelCase__ = '''adapter_model.bin'''
lowerCAmelCase__ = '''adapter_model.safetensors'''
lowerCAmelCase__ = '''tf_model.h5'''
lowerCAmelCase__ = '''tf_model.h5.index.json'''
lowerCAmelCase__ = '''model.ckpt'''
lowerCAmelCase__ = '''flax_model.msgpack'''
lowerCAmelCase__ = '''flax_model.msgpack.index.json'''
lowerCAmelCase__ = '''model.safetensors'''
lowerCAmelCase__ = '''model.safetensors.index.json'''
lowerCAmelCase__ = '''config.json'''
lowerCAmelCase__ = '''preprocessor_config.json'''
lowerCAmelCase__ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase__ = '''generation_config.json'''
lowerCAmelCase__ = '''modelcard.json'''
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
if version.parse(A_ ) < version.parse(A_ ):
if "dev" in min_version:
_lowerCamelCase : Optional[Any] = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
_lowerCamelCase : Union[str, Any] = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 83 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663 | 0 |
import requests
UpperCAmelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# fetching a list of articles in json format
lowercase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 84 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
_lowerCamelCase : Optional[Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) )
SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"]
SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : str = params["dim"]
SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : Dict = n_heads
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : Dict = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : List[Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : str = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[str] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) )
SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56
SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class snake_case :
def __init__( self : int , a_ : int , a_ : Union[str, Any]=13 , a_ : List[str]=7 , a_ : int=True , a_ : List[str]=True , a_ : List[Any]=False , a_ : Optional[Any]=True , a_ : Optional[Any]=99 , a_ : List[Any]=64 , a_ : Optional[int]=5 , a_ : str=4 , a_ : List[Any]=64 , a_ : int="gelu" , a_ : List[str]=0.1 , a_ : List[Any]=0.1 , a_ : Optional[Any]=512 , a_ : Union[str, Any]=16 , a_ : int=2 , a_ : Dict=0.02 , a_ : Optional[Any]=3 , a_ : List[Any]=4 , a_ : Optional[int]=None , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = scope
def __lowercase( self : Dict )-> List[str]:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowercase( self : int , a_ : Any , a_ : Dict , a_ : Dict , a_ : Optional[Any] , a_ : List[Any] , a_ : Optional[int] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase( self : Tuple , a_ : int , a_ : Union[str, Any] , a_ : List[str] , a_ : Optional[Any] , a_ : Tuple , a_ : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Dict , a_ : int , a_ : List[Any] , a_ : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_choices
SCREAMING_SNAKE_CASE__ : Any = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self : int , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : List[str] , a_ : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = True
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = MPNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 85 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :int = Path(__file__).parent / 'model_card_template.md'
__a :int = uuida().hex
__a :Optional[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :Optional[int] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :Union[str, Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Dict = 0
else:
with open(cache_version_file) as f:
try:
__a :str = int(f.read())
except ValueError:
__a :List[str] = 0
if cache_version < 1:
__a :Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :List[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Any ,*,
__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 86 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = 0
A__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
"""simple docstring"""
import argparse
import copy
def _snake_case ( __snake_case : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = {}
with open(__snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCamelCase : Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCamelCase : int = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCamelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCamelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _snake_case ( __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
with open(__snake_case ) as f:
_lowerCamelCase : str = f.read(1 )
_lowerCamelCase : Dict = start_node
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = start_node
_lowerCamelCase : Tuple = 0
while visiting not in first_solution:
_lowerCamelCase : int = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution:
_lowerCamelCase : List[Any] = k[1]
_lowerCamelCase : Any = k[0]
first_solution.append(__snake_case )
_lowerCamelCase : Optional[int] = distance_of_first_solution + int(__snake_case )
_lowerCamelCase : Any = best_node
first_solution.append(__snake_case )
_lowerCamelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCamelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = []
for n in solution[1:-1]:
_lowerCamelCase : Tuple = solution.index(__snake_case )
for kn in solution[1:-1]:
_lowerCamelCase : Union[str, Any] = solution.index(__snake_case )
if n == kn:
continue
_lowerCamelCase : Optional[int] = copy.deepcopy(__snake_case )
_lowerCamelCase : Any = kn
_lowerCamelCase : str = n
_lowerCamelCase : List[str] = 0
for k in _tmp[:-1]:
_lowerCamelCase : List[Any] = _tmp[_tmp.index(__snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCamelCase : str = distance + int(i[1] )
_tmp.append(__snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCamelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _snake_case ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = 1
_lowerCamelCase : str = first_solution
_lowerCamelCase : int = []
_lowerCamelCase : str = distance_of_first_solution
_lowerCamelCase : int = solution
while count <= iters:
_lowerCamelCase : Optional[Any] = find_neighborhood(__snake_case , __snake_case )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : List[Any] = neighborhood[index_of_best_solution]
_lowerCamelCase : List[str] = len(__snake_case ) - 1
_lowerCamelCase : Optional[int] = False
while not found:
_lowerCamelCase : str = 0
while i < len(__snake_case ):
if best_solution[i] != solution[i]:
_lowerCamelCase : Optional[Any] = best_solution[i]
_lowerCamelCase : List[str] = solution[i]
break
_lowerCamelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Any = best_solution[:-1]
_lowerCamelCase : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCamelCase : Tuple = cost
_lowerCamelCase : Optional[Any] = solution
else:
_lowerCamelCase : Dict = index_of_best_solution + 1
_lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution]
if len(__snake_case ) >= size:
tabu_list.pop(0 )
_lowerCamelCase : Any = count + 1
return best_solution_ever, best_cost
def _snake_case ( __snake_case : Any=None ):
"""simple docstring"""
_lowerCamelCase : Dict = generate_neighbours(args.File )
_lowerCamelCase , _lowerCamelCase : Any = generate_first_solution(
args.File , __snake_case )
_lowerCamelCase , _lowerCamelCase : List[str] = tabu_search(
__snake_case , __snake_case , __snake_case , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : Any = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : Any = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = sorted(numsa + numsa )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""") | 90 |
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''owlvit_text_model'''
def __init__( self : Optional[Any] ,A_ : List[str]=4_9408 ,A_ : Optional[int]=512 ,A_ : Dict=2048 ,A_ : List[str]=12 ,A_ : Optional[Any]=8 ,A_ : List[Any]=16 ,A_ : List[str]="quick_gelu" ,A_ : int=1e-5 ,A_ : int=0.0 ,A_ : List[str]=0.02 ,A_ : Tuple=1.0 ,A_ : Union[str, Any]=0 ,A_ : Tuple=4_9406 ,A_ : Optional[int]=4_9407 ,**A_ : int ,) -> Union[str, Any]:
super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = intermediate_size
A = num_hidden_layers
A = num_attention_heads
A = max_position_embeddings
A = hidden_act
A = layer_norm_eps
A = attention_dropout
A = initializer_range
A = initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,A_ : Union[str, os.PathLike] ,**A_ : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
A , A = cls.get_config_dict(A_ ,**A_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ ,**A_ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''owlvit_vision_model'''
def __init__( self : int ,A_ : List[str]=768 ,A_ : Dict=3072 ,A_ : List[str]=12 ,A_ : int=12 ,A_ : Optional[Any]=3 ,A_ : Optional[int]=768 ,A_ : List[str]=32 ,A_ : Union[str, Any]="quick_gelu" ,A_ : Optional[int]=1e-5 ,A_ : Optional[Any]=0.0 ,A_ : Tuple=0.02 ,A_ : Optional[int]=1.0 ,**A_ : List[Any] ,) -> Optional[int]:
super().__init__(**A_ )
A = hidden_size
A = intermediate_size
A = num_hidden_layers
A = num_attention_heads
A = num_channels
A = image_size
A = patch_size
A = hidden_act
A = layer_norm_eps
A = attention_dropout
A = initializer_range
A = initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,A_ : Union[str, os.PathLike] ,**A_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
A , A = cls.get_config_dict(A_ ,**A_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ ,**A_ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''owlvit'''
_lowerCamelCase: List[str] = True
def __init__( self : List[str] ,A_ : int=None ,A_ : str=None ,A_ : Tuple=512 ,A_ : int=2.65_92 ,A_ : Optional[Any]=True ,**A_ : Any ,) -> Any:
super().__init__(**A_ )
if text_config is None:
A = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
A = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
A = OwlViTTextConfig(**A_ )
A = OwlViTVisionConfig(**A_ )
A = projection_dim
A = logit_scale_init_value
A = return_dict
A = 1.0
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,A_ : Union[str, os.PathLike] ,**A_ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
A , A = cls.get_config_dict(A_ ,**A_ )
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ ,**A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Dict ,A_ : Dict ,**A_ : List[Any] ) -> Optional[int]:
A = {}
A = text_config
A = vision_config
return cls.from_dict(A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = copy.deepcopy(self.__dict__ )
A = self.text_config.to_dict()
A = self.vision_config.to_dict()
A = self.__class__.model_type
return output
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : "ProcessorMixin" ,A_ : int = -1 ,A_ : int = -1 ,A_ : Optional["TensorType"] = None ,) -> Mapping[str, Any]:
A = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=A_ ,seq_length=A_ ,framework=A_ )
A = super().generate_dummy_inputs(
processor.image_processor ,batch_size=A_ ,framework=A_ )
return {**text_input_dict, **image_input_dict}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return 14 | 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase_ = {
"""facebook/blenderbot_small-90M""": 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : List[str]="<|endoftext|>" , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : int =add_prefix_space
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=None ):
'''simple docstring'''
lowercase : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 92 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A = """bart"""
__A = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def __A () ->Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ :Tuple = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
lowerCAmelCase__ :Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
lowerCAmelCase__ :List[Any] = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
lowerCAmelCase__ :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
lowerCAmelCase__ :List[str] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
lowerCAmelCase__ :Tuple = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def __A () ->Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ :Optional[int] = faiss.StandardGpuResources()
lowerCAmelCase__ :Union[str, Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
lowerCAmelCase__ :Optional[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ :List[Any] = faiss.IndexFlatIP(128 )
lowerCAmelCase__ :Union[str, Any] = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = (None, None)
lowerCAmelCase__ :Optional[Any] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :int = datasets.load_dataset('eli5' , name='LFQA_reddit' )
lowerCAmelCase__ :int = elia['train_eli5']
lowerCAmelCase__ :int = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ :Optional[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__A , __A , __A = load_indexes()
__A , __A , __A , __A = load_models()
__A , __A = load_train_data()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :str = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="wiki40b" , _SCREAMING_SNAKE_CASE="dense" , _SCREAMING_SNAKE_CASE=10 ) ->List[str]:
"""simple docstring"""
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ :str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ :str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ , lowerCAmelCase__ :int = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Optional[int] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
lowerCAmelCase__ :List[Any] = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.9_5 , _SCREAMING_SNAKE_CASE=0.8 ) ->Any:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__A = st.sidebar.checkbox("""Demo options""")
if demo_options:
__A = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__A = action_list.index(action_st)
__A = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__A = show_type == """Show full text of passages"""
else:
__A = 3
__A = True
__A = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__A = """wiki40b"""
__A = """dense"""
__A = """beam"""
__A = 2
__A = 64
__A = 256
__A = None
__A = None
__A = st.sidebar.checkbox("""Generation options""")
if generate_options:
__A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__A = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__A = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A = None
# start main text
__A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__A = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A = st.text_input("""Enter your question here:""", """""")
else:
__A = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__A , __A = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__A , __A = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A = support_list[:10]
__A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__A , __A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A , __A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__A = res[1].strip()
if sec_titles == "":
__A = """[{}]({})""".format(res[0], wiki_url)
else:
__A = sec_titles.split(""" & """)
__A = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__A = find_nearest_training(question)
__A = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__A = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 93 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['GLPNFeatureExtractor']
SCREAMING_SNAKE_CASE = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 0 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ : str = self.__min_dist_top_down_dp(lowerCAmelCase_ , n - 1 )
UpperCAmelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ : str = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : Optional[int] = worda
UpperCAmelCase_ : Union[str, Any] = worda
UpperCAmelCase_ : str = [[-1 for _ in range(len(lowerCAmelCase_ ) )] for _ in range(len(lowerCAmelCase_ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase_ ) - 1 , len(lowerCAmelCase_ ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : str = worda
UpperCAmelCase_ : Any = worda
UpperCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ )
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ : int = j
elif j == 0: # second string is empty
UpperCAmelCase_ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ : Any = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ : Union[str, Any] = self.dp[i][j - 1]
UpperCAmelCase_ : List[Any] = self.dp[i - 1][j]
UpperCAmelCase_ : Union[str, Any] = self.dp[i - 1][j - 1]
UpperCAmelCase_ : Union[str, Any] = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase_ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCamelCase_ = input('''Enter the first string: ''').strip()
lowerCamelCase_ = input('''Enter the second string: ''').strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 95 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "levit"
def __init__( self : Any , __snake_case : str=2_2_4 , __snake_case : Dict=3 , __snake_case : List[Any]=3 , __snake_case : Union[str, Any]=2 , __snake_case : str=1 , __snake_case : str=1_6 , __snake_case : Tuple=[1_2_8, 2_5_6, 3_8_4] , __snake_case : Optional[int]=[4, 8, 1_2] , __snake_case : Tuple=[4, 4, 4] , __snake_case : Any=[1_6, 1_6, 1_6] , __snake_case : Optional[Any]=0 , __snake_case : Any=[2, 2, 2] , __snake_case : Dict=[2, 2, 2] , __snake_case : Tuple=0.02 , **__snake_case : Tuple , ) -> str:
super().__init__(**__snake_case )
__magic_name__: List[Any] = image_size
__magic_name__: Tuple = num_channels
__magic_name__: List[str] = kernel_size
__magic_name__: List[str] = stride
__magic_name__: Any = padding
__magic_name__: Optional[int] = hidden_sizes
__magic_name__: Dict = num_attention_heads
__magic_name__: int = depths
__magic_name__: Dict = key_dim
__magic_name__: Dict = drop_path_rate
__magic_name__: Tuple = patch_size
__magic_name__: Optional[int] = attention_ratio
__magic_name__: Union[str, Any] = mlp_ratio
__magic_name__: Tuple = initializer_range
__magic_name__: Dict = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : Dict ) -> float:
return 1E-4
| 96 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( snake_case__: str ):
'''simple docstring'''
return "".join(sorted(snake_case__ ) )
def a ( snake_case__: str ):
'''simple docstring'''
return word_by_signature[signature(snake_case__ )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 97 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCamelCase : str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "retribert"
def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders
SCREAMING_SNAKE_CASE__ : int = projection_dim
| 663 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , **lowerCAmelCase__ : Optional[int] , ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_embeddings.shape
_UpperCamelCase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
_UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = 42
if negative_prompt is None:
_UpperCamelCase = ['''''']
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="""
f""" {type(lowerCAmelCase__ )}.""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
_UpperCamelCase = negative_prompt
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = uncond_embeddings.shape[1]
_UpperCamelCase = uncond_embeddings.repeat(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
_UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(self.device )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCamelCase = latents_reference.to(self.device )
_UpperCamelCase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_UpperCamelCase = (latents_shape[3] - latents_shape_reference[3]) // 2
_UpperCamelCase = (latents_shape[2] - latents_shape_reference[2]) // 2
_UpperCamelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_UpperCamelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_UpperCamelCase = 0 if dx < 0 else dx
_UpperCamelCase = 0 if dy < 0 else dy
_UpperCamelCase = max(-dx , 0 )
_UpperCamelCase = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_UpperCamelCase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
_UpperCamelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2 )
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = 1 / 0.18215 * latents
_UpperCamelCase = self.vae.decode(lowerCAmelCase__ ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_UpperCamelCase = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase__ ) , return_tensors='''pt''' ).to(
self.device )
_UpperCamelCase , _UpperCamelCase = self.safety_checker(
images=lowerCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_UpperCamelCase = None
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 98 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a (lowerCAmelCase__ ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_5_5.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A , __A , __A , ):
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = None , __A = 1 , __A = 100 , __A = 0.0 , __A = None , __A = "pil" , __A = True , ):
if isinstance(__A , PIL.Image.Image ):
__a = 1
elif isinstance(__A , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__A )}''' )
if isinstance(__A , PIL.Image.Image ):
__a = preprocess(__A )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
__a = image.to(device=self.device , dtype=__A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__A , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__A ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
__a = self.unet(__A , __A ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__A ).sample
__a = torch.clamp(__A , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 99 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = CustomTokenizer
pass
| 100 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : int =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mobilenet_v2"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1.0 , lowerCAmelCase__=8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=3_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu6" , lowerCAmelCase__=True , lowerCAmelCase__=0.8 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.001 , lowerCAmelCase__=2_5_5 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : int = image_size
SCREAMING_SNAKE_CASE_ : int = depth_multiplier
SCREAMING_SNAKE_CASE_ : Tuple = depth_divisible_by
SCREAMING_SNAKE_CASE_ : int = min_depth
SCREAMING_SNAKE_CASE_ : Dict = expand_ratio
SCREAMING_SNAKE_CASE_ : Any = output_stride
SCREAMING_SNAKE_CASE_ : str = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : Any = finegrained_output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = tf_padding
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = semantic_loss_ignore_index
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-4
| 101 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : Dict = """FlavaImageProcessor"""
__lowerCAmelCase : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
UpperCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
UpperCamelCase : Union[str, Any] = kwargs.pop("""feature_extractor""" )
UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_A , _A )
UpperCamelCase : List[str] = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = False , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase : List[Any] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if images is not None:
UpperCamelCase : int = self.image_processor(
_A , return_image_mask=_A , return_codebook_pixels=_A , return_tensors=_A , **_A , )
if text is not None and images is not None:
encoding.update(_A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _A , )
return self.image_processor
| 102 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Dict = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"]
return input_features
def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> tuple[list[list[int]], list[list[int]]]:
_snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the reference grid
_snake_case = 1
_snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the action grid
_snake_case = init[0]
_snake_case = init[1]
_snake_case = 0
_snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
_snake_case = [[f, g, x, y]]
_snake_case = False # flag that is set when search is complete
_snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_snake_case = cell.pop()
_snake_case = next_cell[2]
_snake_case = next_cell[3]
_snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
_snake_case = True
else:
for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions
_snake_case = x + DIRECTIONS[i][0]
_snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_snake_case = g + cost
_snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_snake_case = 1
_snake_case = i
_snake_case = []
_snake_case = goal[0]
_snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_snake_case = x - DIRECTIONS[action[x][y]][0]
_snake_case = y - DIRECTIONS[action[x][y]][1]
_snake_case = xa
_snake_case = ya
invpath.append([x, y] )
_snake_case = []
for i in range(len(lowerCAmelCase_ ) ):
path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case = [0, 0]
# all coordinates are given in format [y,x]
snake_case = [len(grid) - 1, len(grid[0]) - 1]
snake_case = 1
# the cost map which pushes the path closer to the goal
snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case = 9_9
snake_case , snake_case = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 103 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> str:
"""simple docstring"""
A__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> dict[str, str]:
"""simple docstring"""
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCAmelCase_ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase_ ), 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(UpperCAmelCase_, UpperCAmelCase_ ) for ch in message.upper() )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : dict[str, str] ) -> str:
"""simple docstring"""
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase_, UpperCAmelCase_ ) for ch in message.upper() )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("Enter message to encode or decode: " ).strip()
A__ = input("Enter keyword: " ).strip()
A__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A__ = create_cipher_map(UpperCAmelCase_ )
print(func(UpperCAmelCase_, UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Tuple = ["image_processor", "tokenizer"]
__a : str = "LayoutLMv3ImageProcessor"
__a : Optional[int] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ ,snake_case__ )
def __call__( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = True ,snake_case__ = False ,snake_case__ = None ,snake_case__ = None ,snake_case__ = 0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,snake_case__ = False ,snake_case__ = False ,snake_case__ = False ,snake_case__ = True ,snake_case__ = None ,**snake_case__ ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(images=snake_case__ ,return_tensors=snake_case__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE_ : List[str] = features['words']
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(
text=text if text is not None else features['words'] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['boxes'] ,word_labels=snake_case__ ,add_special_tokens=snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,max_length=snake_case__ ,stride=snake_case__ ,pad_to_multiple_of=snake_case__ ,return_token_type_ids=snake_case__ ,return_attention_mask=snake_case__ ,return_overflowing_tokens=snake_case__ ,return_special_tokens_mask=snake_case__ ,return_offsets_mapping=snake_case__ ,return_length=snake_case__ ,verbose=snake_case__ ,return_tensors=snake_case__ ,**snake_case__ ,)
# add pixel values
SCREAMING_SNAKE_CASE_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_overflowing_images(snake_case__ ,encoded_inputs['overflow_to_sample_mapping'] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = images
return encoded_inputs
def snake_case ( self ,snake_case__ ,snake_case__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE_ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(snake_case__ )} and {len(snake_case__ )}' )
return images_with_overflow
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,snake_case__ ,)
return self.image_processor_class
@property
def snake_case ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,snake_case__ ,)
return self.image_processor
| 105 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case :List[str] ={'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A_ : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ) -> Any:
A = ZeroShotClassificationPipeline(
model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> int:
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# No kwarg
A = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
A = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase , [
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] , )
A = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase , [
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCamelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase , candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__UpperCamelCase , )
self.run_entailment_id(__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : Pipeline ) -> Any:
A = zero_shot_classifier.model.config
A = config.labelaid
A = zero_shot_classifier.entailment_id
A = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A = original_labelaid
self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __UpperCamelCase ( self : int ) -> Dict:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __UpperCamelCase ( self : List[str] ) -> Any:
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , ) | 106 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
_lowerCamelCase : Optional[Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) )
SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"]
SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : str = params["dim"]
SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : Dict = n_heads
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : Dict = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : List[Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : str = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[str] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) )
SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56
SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 | 0 |
'''simple docstring'''
import argparse
import datetime
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_A = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__snake_case ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
_A = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
_A = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_A = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_A = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_A = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_A = datetime.date(int(__snake_case ) , int(__snake_case ) , int(__snake_case ) )
# Start math
if m <= 2:
_A = y - 1
_A = m + 1_2
# maths var
_A = int(str(__snake_case )[:2] )
_A = int(str(__snake_case )[2:] )
_A = int(2.6 * m - 5.39 )
_A = int(c / 4 )
_A = int(k / 4 )
_A = int(d + k )
_A = int(t + u + v + x )
_A = int(z - (2 * c) )
_A = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_A = F'Your date {date_input}, is a {days[str(__snake_case )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Dict = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_UpperCAmelCase : Any = parser.parse_args()
zeller(args.date_input)
| 107 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
_UpperCAmelCase = 6
_UpperCAmelCase = 1_2_8
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (4, 8, 1_6, 3_2)
elif "large" in model_name:
_UpperCAmelCase = 1_2
_UpperCAmelCase = 1_9_2
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
_UpperCAmelCase = window_size
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
return config
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
if "encoder.mask_token" in name:
_UpperCAmelCase = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
_UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_UpperCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_UpperCAmelCase = """layernorm.weight"""
if name == "encoder.norm.bias":
_UpperCAmelCase = """layernorm.bias"""
if "decoder" in name:
pass
else:
_UpperCAmelCase = """swin.""" + name
return name
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase = key.split(""".""" )
_UpperCAmelCase = int(key_split[2] )
_UpperCAmelCase = int(key_split[4] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
_UpperCAmelCase = torch.load(__snake_case , map_location="""cpu""" )["""model"""]
_UpperCAmelCase = get_swin_config(__snake_case )
_UpperCAmelCase = SwinForMaskedImageModeling(__snake_case )
model.eval()
_UpperCAmelCase = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2} )
_UpperCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
_UpperCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
with torch.no_grad():
_UpperCAmelCase = model(**__snake_case ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
__a: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__a: int = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 108 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
__SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = HubertForCTC(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertModel(__UpperCAmelCase )
if is_finetuned:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 109 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
a_ = """sew-d"""
def __init__( self : str ,_a : str=32 ,_a : Optional[int]=768 ,_a : Optional[int]=12 ,_a : Tuple=12 ,_a : str=3072 ,_a : Optional[Any]=2 ,_a : Optional[Any]=512 ,_a : int=256 ,_a : Optional[int]=True ,_a : Dict=True ,_a : int=("p2c", "c2p") ,_a : Dict="layer_norm" ,_a : Tuple="gelu_python" ,_a : List[Any]=0.1 ,_a : Optional[int]=0.1 ,_a : int=0.1 ,_a : str=0.0 ,_a : int=0.1 ,_a : List[str]=0.02 ,_a : int=1e-7 ,_a : Union[str, Any]=1e-5 ,_a : Any="group" ,_a : List[str]="gelu" ,_a : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_a : List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_a : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_a : Any=False ,_a : Any=128 ,_a : List[str]=16 ,_a : str=True ,_a : Dict=0.05 ,_a : Any=10 ,_a : Dict=2 ,_a : str=0.0 ,_a : Dict=10 ,_a : Union[str, Any]=0 ,_a : Optional[Any]="mean" ,_a : str=False ,_a : Dict=False ,_a : int=256 ,_a : List[Any]=0 ,_a : Any=1 ,_a : Optional[Any]=2 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase )
A_ : Union[str, Any] = hidden_size
A_ : int = feat_extract_norm
A_ : List[Any] = feat_extract_activation
A_ : Any = list(_UpperCAmelCase )
A_ : List[Any] = list(_UpperCAmelCase )
A_ : Dict = list(_UpperCAmelCase )
A_ : List[Any] = conv_bias
A_ : List[Any] = num_conv_pos_embeddings
A_ : Union[str, Any] = num_conv_pos_embedding_groups
A_ : Optional[int] = len(self.conv_dim )
A_ : str = num_hidden_layers
A_ : Dict = intermediate_size
A_ : List[Any] = squeeze_factor
A_ : List[str] = max_position_embeddings
A_ : List[Any] = position_buckets
A_ : Optional[Any] = share_att_key
A_ : List[Any] = relative_attention
A_ : int = norm_rel_ebd
A_ : int = list(_UpperCAmelCase )
A_ : Optional[int] = hidden_act
A_ : Optional[int] = num_attention_heads
A_ : Any = hidden_dropout
A_ : Optional[Any] = attention_dropout
A_ : List[str] = activation_dropout
A_ : Dict = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : List[Any] = layer_norm_eps
A_ : Optional[Any] = feature_layer_norm_eps
A_ : Union[str, Any] = initializer_range
A_ : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : str = apply_spec_augment
A_ : List[str] = mask_time_prob
A_ : Optional[Any] = mask_time_length
A_ : Optional[int] = mask_time_min_masks
A_ : Optional[Any] = mask_feature_prob
A_ : str = mask_feature_length
A_ : Optional[int] = mask_feature_min_masks
# ctc loss
A_ : Optional[Any] = ctc_loss_reduction
A_ : List[str] = ctc_zero_infinity
# sequence classification
A_ : Union[str, Any] = use_weighted_layer_sum
A_ : Optional[int] = classifier_proj_size
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 665 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __lowerCamelCase ):
def __init__( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=20_48 ):
lowerCAmelCase_ : List[Any] = config.__dict__
lowerCAmelCase_ : Union[str, Any] = modal_hidden_size
if num_labels:
lowerCAmelCase_ : Dict = num_labels
| 600 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
import numpy as np
class snake_case__ :
'''simple docstring'''
def __init__( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def __eq__( self : List[str] , lowerCAmelCase_ : List[Any] ) -> int:
return self.position == cell.position
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
print(self.position )
class snake_case__ :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : int=(5, 5) ) -> str:
UpperCAmelCase_ = np.zeros(_UpperCAmelCase )
UpperCAmelCase_ = world_size[0]
UpperCAmelCase_ = world_size[1]
def UpperCamelCase ( self : Any ) -> Dict:
print(self.w )
def UpperCamelCase ( self : int , lowerCAmelCase_ : Optional[int] ) -> Dict:
UpperCAmelCase_ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ = cell.position[0]
UpperCAmelCase_ = cell.position[1]
UpperCAmelCase_ = []
for n in neughbour_cord:
UpperCAmelCase_ = current_x + n[0]
UpperCAmelCase_ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ = Cell()
UpperCAmelCase_ = (x, y)
UpperCAmelCase_ = cell
neighbours.append(_UpperCAmelCase )
return neighbours
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :List[Any] ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
UpperCAmelCase_ = np.argmin([n.f for n in _open] )
UpperCAmelCase_ = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ = current.g + 1
UpperCAmelCase_ = n.position
UpperCAmelCase_ = goal.position
UpperCAmelCase_ = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_lowerCamelCase : Dict = Gridworld()
# Start position and goal
_lowerCamelCase : Dict = Cell()
_lowerCamelCase : List[Any] = (0, 0)
_lowerCamelCase : List[str] = Cell()
_lowerCamelCase : Optional[Any] = (4, 4)
print(f"path from {start.position} to {goal.position}")
_lowerCamelCase : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_lowerCamelCase : Optional[int] = 1
print(world.w)
| 121 |
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 663 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = IFPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def A ( self : Dict ) -> Optional[Any]:
return self._get_dummy_components()
def A ( self : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Optional[Any]:
if str(_UpperCAmelCase ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase : List[str] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A ( self : Dict ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def A ( self : str ) -> Dict:
super().test_save_load_floataa(expected_max_diff=1E-1 )
def A ( self : int ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def A ( self : Union[str, Any] ) -> List[Any]:
self._test_save_load_local()
def A ( self : Optional[int] ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self : Tuple ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : List[str] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[Any] = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
UpperCAmelCase : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase : Any = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase : Optional[int] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ) -> Union[str, Any]:
_start_torch_memory_measurement()
UpperCAmelCase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''np''' , )
UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Optional[int]:
_start_torch_memory_measurement()
UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''np''' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : int = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : List[str] , __snake_case : int , __snake_case : Any , __snake_case : List[str] , __snake_case : List[Any] ) -> List[str]:
_start_torch_memory_measurement()
UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
UpperCAmelCase : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''np''' , )
UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
UpperCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
UpperCAmelCase : Dict = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
'''simple docstring'''
class A ( __lowerCamelCase ):
pass
class A ( __lowerCamelCase ):
pass
class A :
def __init__( self ) -> Dict:
_a = [
[],
[],
[],
]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> None:
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_UpperCAmelCase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __lowerCAmelCase ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class A :
def __init__( self ) -> Union[str, Any]:
_a = []
def __lowerCAmelCase ( self , snake_case_ ) -> None:
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
_a = min(self.queue )
self.queue.remove(_UpperCAmelCase )
return data
def __str__( self ) -> str:
return str(self.queue )
def _lowercase ( ):
_a = FixedPriorityQueue()
fpq.enqueue(0, 10 )
fpq.enqueue(1, 70 )
fpq.enqueue(0, 100 )
fpq.enqueue(2, 1 )
fpq.enqueue(2, 5 )
fpq.enqueue(1, 7 )
fpq.enqueue(2, 4 )
fpq.enqueue(1, 64 )
fpq.enqueue(0, 128 )
print(SCREAMING_SNAKE_CASE__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowercase ( ):
_a = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(SCREAMING_SNAKE_CASE__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 131 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCamelCase = "data2vec-text"
def __init__( self : int , A__ : Optional[int]=3_0_5_2_2 , A__ : List[str]=7_6_8 , A__ : int=1_2 , A__ : Any=1_2 , A__ : Union[str, Any]=3_0_7_2 , A__ : Union[str, Any]="gelu" , A__ : Tuple=0.1 , A__ : Union[str, Any]=0.1 , A__ : int=5_1_2 , A__ : Optional[Any]=2 , A__ : int=0.02 , A__ : Optional[int]=1E-12 , A__ : List[Any]=1 , A__ : Optional[int]=0 , A__ : Dict=2 , A__ : Any="absolute" , A__ : str=True , A__ : str=None , **A__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
a__ : Optional[int] = vocab_size
a__ : Tuple = hidden_size
a__ : Dict = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Tuple = hidden_act
a__ : Dict = intermediate_size
a__ : Tuple = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : Union[str, Any] = max_position_embeddings
a__ : int = type_vocab_size
a__ : Dict = initializer_range
a__ : Dict = layer_norm_eps
a__ : List[Any] = position_embedding_type
a__ : Union[str, Any] = use_cache
a__ : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 688 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
import numpy
class A :
def __init__(self : Dict , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : numpy.ndarray ) -> None:
"""simple docstring"""
UpperCAmelCase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase__ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase__ = numpy.zeros(output_array.shape )
def lowercase_ (self : Optional[int] ) -> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase_ (self : Union[str, Any] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase__ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def lowercase_ (self : Dict , __UpperCAmelCase : numpy.ndarray ) -> int:
"""simple docstring"""
UpperCAmelCase__ = input_arr
UpperCAmelCase__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( __A ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( __A ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
), dtype=numpy.floataa, )
# True output values for the given input values.
UpperCAmelCase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase__ = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE__, output_array=SCREAMING_SNAKE_CASE__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE__, iterations=10, give_loss=SCREAMING_SNAKE_CASE__ )
return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 486 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCamelCase ):
_lowerCAmelCase : List[str] = ['''speech''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''speech'''])
class lowerCamelCase ( metaclass=__lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = ['''speech''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''speech'''])
| 462 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( __lowerCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE=PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
A_ : Dict = do_resize
A_ : Optional[int] = do_rescale
A_ : Optional[int] = size_divisor
A_ : Optional[Any] = resample
super().__init__(**_UpperCAmelCase )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE )->np.ndarray:
'''simple docstring'''
A_ : Tuple = get_image_size(_UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
A_ : List[str] = height // size_divisor * size_divisor
A_ : Union[str, Any] = width // size_divisor * size_divisor
A_ : List[str] = resize(_UpperCAmelCase , (new_h, new_w) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
return image
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE )->np.ndarray:
'''simple docstring'''
return rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->BatchFeature:
'''simple docstring'''
A_ : Tuple = do_resize if do_resize is not None else self.do_resize
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
A_ : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
A_ : Optional[int] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_UpperCAmelCase ) for img in images]
if do_resize:
A_ : Optional[Any] = [self.resize(_UpperCAmelCase , size_divisor=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
A_ : str = [self.rescale(_UpperCAmelCase , scale=1 / 255 ) for image in images]
A_ : Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
A_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 590 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_SCREAMING_SNAKE_CASE : List[str] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _lowercase ( __lowerCamelCase : int=True ) -> Optional[Any]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowerCamelCase ) )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : List[Any] = None
a__ : Optional[Any] = None
def __lowercase( self : int, __lowerCamelCase : Tuple, __lowerCamelCase : List[Any] ) -> int:
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : List[Any] = dataset_module_factory(_UpperCAmelCase, cache_dir=_UpperCAmelCase )
UpperCamelCase__ : Any = import_main_class(dataset_module.module_path, dataset=_UpperCAmelCase )
UpperCamelCase__ : DatasetBuilder = builder_cls(
cache_dir=_UpperCAmelCase, config_name=_UpperCAmelCase, hash=dataset_module.hash, )
UpperCamelCase__ : str = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_UpperCAmelCase ).replace(os.sep, '''/''' ),
config.DATASET_INFO_FILENAME,
] )
UpperCamelCase__ : Union[str, Any] = cached_path(_UpperCAmelCase, cache_dir=_UpperCAmelCase )
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
@pytest.mark.integration
def _lowercase ( __lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / "test_wikipedia_simple"
UpperCamelCase__ : List[str] = dataset_module_factory('''wikipedia''' ,cache_dir=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Any = import_main_class(dataset_module.module_path )
UpperCamelCase__ : DatasetBuilder = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE__ ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCamelCase__ : List[Any] = None
builder_instance.download_and_prepare()
UpperCamelCase__ : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowercase ( __lowerCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = dataset_module_factory('''wikipedia''' ,cache_dir=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[int] = import_main_class(dataset_module.module_path ,dataset=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : DatasetBuilder = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE__ ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
UpperCamelCase__ : Optional[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
assert "train" in ds
assert isinstance(ds['''train'''] ,SCREAMING_SNAKE_CASE__ )
assert next(iter(ds['''train'''] ) )
| 344 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCamelCase : str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "retribert"
def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders
SCREAMING_SNAKE_CASE__ : int = projection_dim
| 663 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( __lowerCamelCase , unittest.TestCase ):
lowerCamelCase : Tuple =GPTaTokenizer
lowerCamelCase : Any =GPTaTokenizerFast
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Union[str, Any] ={"""add_prefix_space""": True}
lowerCamelCase : Tuple =False
def __a ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
a : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
a : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a : Any = {"unk_token": "<unk>"}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __a ( self , **lowerCAmelCase__ ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : int = "lower newer"
a : List[Any] = "lower newer"
return input_text, output_text
def __a ( self ) -> Dict:
a : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : Tuple = "lower newer"
a : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
a : Dict = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
a : List[Any] = tokens + [tokenizer.unk_token]
a : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def __a ( self ) -> str:
if not self.test_rust_tokenizer:
return
a : Tuple = self.get_tokenizer()
a : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
a : List[str] = "lower newer"
# Testing tokenization
a : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
a : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
a : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
a : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
a : Dict = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
a : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing the unknown token
a : Dict = tokens + [rust_tokenizer.unk_token]
a : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
pass
def __a ( self , lowerCAmelCase__=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
a : Optional[Any] = "This is a simple input"
a : List[str] = ["This is a simple input 1", "This is a simple input 2"]
a : Any = ("This is a simple input", "This is a pair")
a : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
def __a ( self ) -> int:
a : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
a : Union[str, Any] = "This is a simple input"
a : Dict = ["This is a simple input looooooooong", "This is a simple input"]
a : List[str] = ("This is a simple input", "This is a pair")
a : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
a : Tuple = tokenizer.pad_token_id
a : Tuple = tokenizer(_UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
a : Tuple = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" )
a : Any = tokenizer(*_UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
a : Tuple = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __a ( self ) -> Tuple:
a : Optional[int] = "$$$"
a : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )
a : Optional[Any] = "This is a simple input"
a : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
a : Optional[Any] = tokenizer.bos_token_id
a : Optional[Any] = tokenizer(_UpperCAmelCase )
a : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a : List[str] = tokenizer.decode(out_s.input_ids )
a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __a ( self ) -> Optional[Any]:
pass
def __a ( self ) -> str:
a : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a : List[Any] = "Encode this."
a : Optional[Any] = "This one too please."
a : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a : Dict = tokenizer.encode_plus(
_UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , )
a : Any = encoded_sequence_dict["input_ids"]
a : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
a : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
a : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> int:
a : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_UpperCAmelCase )
a : Dict = "A photo of a cat"
a : Optional[Any] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
a : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
a : Tuple = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
def __a ( self ) -> List[Any]:
a : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=_UpperCAmelCase )
a : Union[str, Any] = "A photo of a cat"
a : List[str] = tokenizer.encode(
_UpperCAmelCase , )
# Same as above
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __a ( self ) -> Optional[int]:
a : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_UpperCAmelCase )
a : List[Any] = "bos"
a : Optional[int] = tokenizer.get_vocab()["bos"]
a : Tuple = "A photo of a cat"
a : Dict = tokenizer.encode(
_UpperCAmelCase , )
# We changed the bos token
self.assertEqual(_UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
a : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
a : Tuple = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
| 633 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663 | 0 |
'''simple docstring'''
__magic_name__ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__magic_name__ = {value: key for key, value in encode_dict.items()}
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""")
return encoded
def lowerCamelCase ( lowerCamelCase : str):
if set(SCREAMING_SNAKE_CASE__) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""")
A_ : Optional[int] = ""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE__) != 0:
decoded += decode_dict[word[:5]]
A_ : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __a :
__snake_case : Any = 42
__snake_case : Optional[Any] = None
@staticmethod
def A ( ):
raise NotImplementedError
def A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : List[str] ):
raise NotImplementedError
def A ( self : List[str] , UpperCAmelCase : Dict ):
raise NotImplementedError
def A ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def A ( cls : Optional[int] ):
return F'`pip install {cls.pip_package or cls.name}`'
class __a ( __lowerCamelCase ):
__snake_case : List[str] = """optuna"""
@staticmethod
def A ( ):
return is_optuna_available()
def A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return run_hp_search_optuna(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : int ):
return default_hp_space_optuna(_UpperCAmelCase )
class __a ( __lowerCamelCase ):
__snake_case : Any = """ray"""
__snake_case : List[str] = """'ray[tune]'"""
@staticmethod
def A ( ):
return is_ray_available()
def A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : str ):
return run_hp_search_ray(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def A ( self : str , UpperCAmelCase : List[Any] ):
return default_hp_space_ray(_UpperCAmelCase )
class __a ( __lowerCamelCase ):
__snake_case : int = """sigopt"""
@staticmethod
def A ( ):
return is_sigopt_available()
def A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : Tuple ):
return run_hp_search_sigopt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : List[Any] ):
return default_hp_space_sigopt(_UpperCAmelCase )
class __a ( __lowerCamelCase ):
__snake_case : List[str] = """wandb"""
@staticmethod
def A ( ):
return is_wandb_available()
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : Tuple ):
return run_hp_search_wandb(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] ):
return default_hp_space_wandb(_UpperCAmelCase )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase_ : int = available_backends[0].name
if len(SCREAMING_SNAKE_CASE__ ) > 1:
logger.info(
f'{len(SCREAMING_SNAKE_CASE__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 600 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 121 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> Optional[Any]:
UpperCAmelCase : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
UpperCAmelCase : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase : List[Any] = tf.placeholder('''float64''' , [dim] )
UpperCAmelCase : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase : Tuple = tf.placeholder('''int32''' )
UpperCAmelCase : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase : int = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [dim] )
UpperCAmelCase : List[Any] = tf.placeholder('''float''' , [dim] )
UpperCAmelCase : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase : Optional[Any] = tf.placeholder('''float''' , [noofclusters] )
UpperCAmelCase : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase : Tuple = 100
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase : int = sess.run(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 127 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Dict = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"]
return input_features
def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
| 663 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A :
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> List[str]:
raise NotImplementedError()
class A ( __lowerCamelCase ):
def __init__( self , snake_case_ , snake_case_ = False , **snake_case_ ) -> int:
_a = tokenizer
_a = skip_prompt
_a = decode_kwargs
# variables used in the streaming process
_a = []
_a = 0
_a = True
def __lowerCAmelCase ( self , snake_case_ ) -> str:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_a = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_a = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_a = text[self.print_len :]
_a = []
_a = 0
# If the last token is a CJK character, we print the characters.
elif len(_UpperCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_a = text[self.print_len :]
self.print_len += len(_UpperCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_a = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_UpperCAmelCase )
self.on_finalized_text(_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_a = text[self.print_len :]
_a = []
_a = 0
else:
_a = ""
_a = True
self.on_finalized_text(_UpperCAmelCase , stream_end=_UpperCAmelCase )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False ) -> Optional[Any]:
print(_UpperCAmelCase , flush=_UpperCAmelCase , end="" if not stream_end else None )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class A ( __lowerCamelCase ):
def __init__( self , snake_case_ , snake_case_ = False , snake_case_ = None , **snake_case_ ) -> List[str]:
super().__init__(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
_a = Queue()
_a = None
_a = timeout
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False ) -> Union[str, Any]:
self.text_queue.put(_UpperCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Tuple:
return self
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 131 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , A__ : int = 1_6 , A__ : int = 8_8 , A__ : Optional[int] = None , A__ : int = 1 , A__ : float = 0.0 , A__ : int = 3_2 , A__ : Optional[int] = None , A__ : bool = False , A__ : Optional[int] = None , A__ : Optional[int] = None , A__ : str = "geglu" , A__ : Optional[int] = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
a__ : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_UpperCAmelCase , attention_head_dim=_UpperCAmelCase , in_channels=_UpperCAmelCase , num_layers=_UpperCAmelCase , dropout=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , cross_attention_dim=_UpperCAmelCase , attention_bias=_UpperCAmelCase , sample_size=_UpperCAmelCase , num_vector_embeds=_UpperCAmelCase , activation_fn=_UpperCAmelCase , num_embeds_ada_norm=_UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a__ : Union[str, Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a__ : List[Any] = [1, 0]
def __lowerCAmelCase ( self : List[str] , A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple=None , A__ : Optional[int]=None , A__ : Dict=None , A__ : bool = True , ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = hidden_states
a__ : Union[str, Any] = []
a__ : Tuple = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a__ : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a__ : List[Any] = self.transformer_index_for_condition[i]
a__ : Tuple = self.transformers[transformer_index](
_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , timestep=_UpperCAmelCase , cross_attention_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a__ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a__ : Union[str, Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_UpperCAmelCase )
| 688 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
UpperCamelCase__ = None
UpperCamelCase__ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
UpperCamelCase__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCAmelCase_ ( __A, __A=1, __A=256 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__, "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__, "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( __A, __A, __A, __A=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__, "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = read_json(os.path.join(SCREAMING_SNAKE_CASE__, "params.json" ) )
UpperCAmelCase__ = NUM_SHARDS[model_size]
UpperCAmelCase__ = params["n_layers"]
UpperCAmelCase__ = params["n_heads"]
UpperCAmelCase__ = n_heads // num_shards
UpperCAmelCase__ = params["dim"]
UpperCAmelCase__ = dim // n_heads
UpperCAmelCase__ = 10_000.0
UpperCAmelCase__ = 1.0 / (base ** (torch.arange(0, SCREAMING_SNAKE_CASE__, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase__ = params["n_kv_heads"] # for GQA / MQA
UpperCAmelCase__ = n_heads_per_shard // num_key_value_heads
UpperCAmelCase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase__ = n_heads
UpperCAmelCase__ = n_heads_per_shard
UpperCAmelCase__ = dim
# permute for sliced rotary
def permute(__A, __A=n_heads, __A=dim, __A=dim ):
return w.view(SCREAMING_SNAKE_CASE__, dima // n_heads // 2, 2, SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
UpperCAmelCase__ = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__, f"""consolidated.{i:02d}.pth""" ), map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
UpperCAmelCase__ = 0
UpperCAmelCase__ = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase__ = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase__ = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase__ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, )
UpperCAmelCase__ = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 )
UpperCAmelCase__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 )
UpperCAmelCase__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 )
UpperCAmelCase__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 )
UpperCAmelCase__ = inv_freq
for k, v in state_dict.items():
UpperCAmelCase__ = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
UpperCAmelCase__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase__ = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
# Write configs
UpperCAmelCase__ = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, "pytorch_model.bin.index.json" ) )
UpperCAmelCase__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
UpperCAmelCase__ = params["multiple_of"] if "multiple_of" in params else 256
UpperCAmelCase__ = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__, intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=SCREAMING_SNAKE_CASE__, )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
UpperCAmelCase__ = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__, torch_dtype=torch.floataa, low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__, safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase__ = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=SCREAMING_SNAKE_CASE__, help="Whether or not to save using `safetensors`." )
UpperCAmelCase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
UpperCAmelCase__ = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 486 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 462 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
_lowerCamelCase : Optional[Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) )
SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"]
SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : str = params["dim"]
SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : Dict = n_heads
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : Dict = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : List[Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : str = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[str] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) )
SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56
SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : int = torch.nn.Linear(10 , 10 )
A_ : Any = torch.optim.SGD(model.parameters() , 0.1 )
A_ : int = Accelerator()
A_ : Dict = accelerator.prepare(_UpperCAmelCase )
try:
pickle.loads(pickle.dumps(_UpperCAmelCase ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 590 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : List[Any] = 'Salesforce/blip-image-captioning-base'
a__ : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
a__ : str = 'image_captioner'
a__ : int = AutoModelForVisionaSeq
a__ : Optional[int] = ['image']
a__ : Any = ['text']
def __init__( self : List[Any], *__lowerCamelCase : Dict, **__lowerCamelCase : List[Any] ) -> int:
requires_backends(self, ['''vision'''] )
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
def __lowercase( self : Union[str, Any], __lowerCamelCase : "Image" ) -> List[str]:
return self.pre_processor(images=_UpperCAmelCase, return_tensors='''pt''' )
def __lowercase( self : Tuple, __lowerCamelCase : List[str] ) -> Optional[int]:
return self.model.generate(**_UpperCAmelCase )
def __lowercase( self : Optional[Any], __lowerCamelCase : Any ) -> Tuple:
return self.pre_processor.batch_decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )[0].strip()
| 344 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100_0000 , _lowercase : int = 10 ) ->int:
'''simple docstring'''
a : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 633 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : List[str] ,_a : Optional[int]=7 ,_a : int=3 ,_a : Union[str, Any]=18 ,_a : Dict=30 ,_a : Any=400 ,_a : str=True ,_a : Tuple=None ,_a : List[Any]=True ,_a : Union[str, Any]=None ,_a : Optional[Any]=True ,_a : Any=[0.5, 0.5, 0.5] ,_a : Optional[int]=[0.5, 0.5, 0.5] ,_a : List[Any]=False ,):
'''simple docstring'''
A_ : Optional[Any] = size if size is not None else {"height": 20, "width": 20}
A_ : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
A_ : List[str] = parent
A_ : Tuple = batch_size
A_ : Tuple = num_channels
A_ : Union[str, Any] = image_size
A_ : Optional[int] = min_resolution
A_ : Optional[Any] = max_resolution
A_ : Any = do_resize
A_ : Tuple = size
A_ : Union[str, Any] = do_center_crop
A_ : Tuple = crop_size
A_ : Optional[int] = do_normalize
A_ : Dict = image_mean
A_ : Optional[int] = image_std
A_ : Any = do_reduce_labels
def _a ( self : Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase ( ):
A_ : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""")
A_ : str = Image.open(dataset[0]["""file"""])
A_ : int = Image.open(dataset[1]["""file"""])
return image, map
def lowerCamelCase ( ):
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""")
A_ : int = Image.open(ds[0]["""file"""])
A_ : Any = Image.open(ds[1]["""file"""])
A_ : List[Any] = Image.open(ds[2]["""file"""])
A_ : Union[str, Any] = Image.open(ds[3]["""file"""])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a_ = BeitImageProcessor if is_vision_available() else None
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = BeitImageProcessingTester(self )
@property
def _a ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_UpperCAmelCase ,"""image_std""" ) )
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels ,_UpperCAmelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=_UpperCAmelCase )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels ,_UpperCAmelCase )
def _a ( self : int ):
'''simple docstring'''
pass
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
A_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : Optional[int] = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _a ( self : int ):
'''simple docstring'''
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,np.ndarray )
# Test not batched input
A_ : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : List[str] = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,torch.Tensor )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : List[Any] = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase )
A_ : str = []
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] ,maps[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : int = image_processing(_UpperCAmelCase ,_UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ : Union[str, Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_UpperCAmelCase ,_UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ : int = prepare_semantic_batch_inputs()
A_ : Tuple = image_processing(_UpperCAmelCase ,_UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ : Optional[int] = prepare_semantic_single_inputs()
A_ : Any = image_processing(_UpperCAmelCase ,_UpperCAmelCase ,return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : List[Any] = True
A_ : Tuple = image_processing(_UpperCAmelCase ,_UpperCAmelCase ,return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 665 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 600 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaPriorPipeline
__A = ['''prompt''']
__A = ['''prompt''', '''negative_prompt''']
__A = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return 32
@property
def UpperCamelCase ( self : List[Any] ) -> str:
return 32
@property
def UpperCamelCase ( self : str ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : Tuple ) -> int:
return 1_00
@property
def UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
UpperCAmelCase_ = PriorTransformer(**_UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCAmelCase_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
UpperCAmelCase_ = CLIPVisionModelWithProjection(_UpperCAmelCase )
return model
@property
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def UpperCamelCase ( self : str ) -> Tuple:
UpperCAmelCase_ = self.dummy_prior
UpperCAmelCase_ = self.dummy_image_encoder
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_image_processor
UpperCAmelCase_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_UpperCAmelCase , clip_sample_range=10.0 , )
UpperCAmelCase_ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=0 ) -> Optional[int]:
if str(_UpperCAmelCase ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self : Any ) -> List[Any]:
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase_ = output.image_embeds
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase_ = image[0, -10:]
UpperCAmelCase_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCAmelCase_ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ = torch_device == "cpu"
UpperCAmelCase_ = True
UpperCAmelCase_ = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
@skip_mps
def UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase_ = torch_device == "cpu"
UpperCAmelCase_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
| 121 |
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 663 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__: List[Any] = re.compile(r"\s+")
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def snake_case_ ( _lowerCAmelCase : int ) -> Dict:
UpperCAmelCase : Optional[int] = [len(SCREAMING_SNAKE_CASE__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE__ ), "line_max": max(SCREAMING_SNAKE_CASE__ )}
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> Any:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str]=5 ) -> int:
UpperCAmelCase : Tuple = ["auto-generated", "autogenerated", "automatically generated"]
UpperCAmelCase : int = example["content"].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str=5 , _lowerCAmelCase : Union[str, Any]=0.0_5 ) -> Any:
UpperCAmelCase : Any = ["unit tests", "test file", "configuration file"]
UpperCAmelCase : Tuple = example["content"].splitlines()
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCAmelCase : Any = example["content"].count('''\n''' )
UpperCAmelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : List[Any] = ["def ", "class ", "for ", "while "]
UpperCAmelCase : List[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=4 ) -> Optional[Any]:
UpperCAmelCase : str = example["content"].splitlines()
UpperCAmelCase : List[str] = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case_ ( _lowerCAmelCase : Any ) -> int:
UpperCAmelCase : Union[str, Any] = tokenizer(example['''content'''] , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
UpperCAmelCase : List[str] = len(example['''content'''] ) / len(SCREAMING_SNAKE_CASE__ )
return {"ratio": ratio}
def snake_case_ ( _lowerCAmelCase : Any ) -> str:
UpperCAmelCase : Dict = {}
results.update(get_hash(SCREAMING_SNAKE_CASE__ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE__ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE__ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE__ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE__ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE__ ) )
return results
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
if not check_uniques(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case_ ( _lowerCAmelCase : Any ) -> Dict:
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
os.unlink(SCREAMING_SNAKE_CASE__ )
# Settings
UpperCamelCase__: Tuple = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__: int = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__: Optional[int] = multiprocessing.cpu_count()
UpperCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__: Dict = time.time()
UpperCamelCase__: Union[str, Any] = load_dataset(args.dataset_name, split="train")
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase__: Union[str, Any] = time.time()
UpperCamelCase__: Dict = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase__: Optional[Any] = set(ds.unique("hash"))
UpperCamelCase__: Any = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase__: Optional[int] = time.time()
UpperCamelCase__: Union[str, Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__: Dict = time.time()
UpperCamelCase__: Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase__: List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__: Union[str, Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__: int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__: Tuple = str(data_dir / F"file-{file_number+1:012}.json")
UpperCamelCase__: Dict = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str]=1_024, lowerCamelCase__ : Optional[int]=1_024, lowerCamelCase__ : List[str]=False, **lowerCamelCase__ : Union[str, Any] ):
_a = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_a = SeqaSeqDataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, type_path="train", **SCREAMING_SNAKE_CASE__ )
_a = tok.pad_token_id
def get_lens(lowerCamelCase__ : Tuple ):
_a = tqdm(
DataLoader(SCREAMING_SNAKE_CASE__, batch_size=512, num_workers=8, shuffle=SCREAMING_SNAKE_CASE__, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(SCREAMING_SNAKE_CASE__ ).sum(1 ).tolist()
_a = batch["labels"].ne(SCREAMING_SNAKE_CASE__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
max_lens.append(max(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
else:
max_lens.extend(SCREAMING_SNAKE_CASE__ )
return max_lens
_a = get_lens(SCREAMING_SNAKE_CASE__ )
_a = SeqaSeqDataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, type_path="val", **SCREAMING_SNAKE_CASE__ )
_a = get_lens(SCREAMING_SNAKE_CASE__ )
pickle_save(SCREAMING_SNAKE_CASE__, train_ds.len_file )
pickle_save(SCREAMING_SNAKE_CASE__, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 131 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE = namedtuple('covid_data', 'cases deaths recovered')
def __a ( lowerCAmelCase__ : str = "https://www.worldometers.info/coronavirus/" ):
a__ : Optional[int] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) )
__SCREAMING_SNAKE_CASE = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 688 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A ( __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = 'vivit'
def __init__(self : Optional[Any] , __UpperCAmelCase : Any=2_2_4 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str=[2, 1_6, 1_6] , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : int=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : int=1_2 , __UpperCAmelCase : Dict=3_0_7_2 , __UpperCAmelCase : Optional[int]="gelu_fast" , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : str=1E-06 , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 486 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
_lowerCAmelCase : Optional[int] = 1_0_0_0_0
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
_lowerCAmelCase : Optional[int] = ParquetConfig
def A( self):
return datasets.DatasetInfo(features=self.config.features)
def A( self , lowercase__):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__UpperCAmelCase : List[Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__UpperCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : Dict = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCAmelCase : int = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__UpperCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : Optional[int] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_UpperCAmelCase):
with open(_UpperCAmelCase , '''rb''') as f:
__UpperCAmelCase : List[str] = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'''files''': files}))
return splits
def A( self , lowercase__):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Dict = table_cast(_UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def A( self , lowercase__):
__UpperCAmelCase : Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
F"Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'")
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , '''rb''') as f:
__UpperCAmelCase : Optional[Any] = pq.ParquetFile(_UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
__UpperCAmelCase : List[str] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(_UpperCAmelCase)
except ValueError as e:
logger.error(F"Failed to read file \'{file}\' with error {type(_UpperCAmelCase)}: {e}")
raise
| 462 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def update_area_of_max_square(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A_ : Tuple = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
A_ : Tuple = update_area_of_max_square(row + 1 , col + 1 )
A_ : List[Any] = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A_ : Optional[Any] = 1 + min([right, diagonal, down] )
A_ : Optional[int] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
A_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A_ : List[str] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
A_ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
A_ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A_ : Any = 1 + min([right, diagonal, down] )
A_ : Any = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
A_ : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
A_ : str = [0]
A_ : Union[str, Any] = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
A_ : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A_ : Tuple = dp_array[row][col + 1]
A_ : str = dp_array[row + 1][col + 1]
A_ : int = dp_array[row + 1][col]
if mat[row][col] == 1:
A_ : Union[str, Any] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : Any = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
A_ : List[Any] = 0
return largest_square_area
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = [0] * (cols + 1)
A_ : Union[str, Any] = [0] * (cols + 1)
A_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A_ : Union[str, Any] = current_row[col + 1]
A_ : Union[str, Any] = next_row[col + 1]
A_ : List[Any] = next_row[col]
if mat[row][col] == 1:
A_ : Tuple = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : List[str] = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
A_ : Any = 0
A_ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 590 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
def _lowercase ( __lowerCamelCase : int = 50000000 ) -> int:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = set()
UpperCamelCase__ : int = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ : str = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
UpperCamelCase__ : str = primea * primea
for primea in primes:
UpperCamelCase__ : int = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ : List[str] = primea * primea * primea * primea
UpperCamelCase__ : List[Any] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 344 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCamelCase : str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "retribert"
def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders
SCREAMING_SNAKE_CASE__ : int = projection_dim
| 663 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
a : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
a : int = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class __UpperCamelCase ( __lowerCamelCase ):
lowerCamelCase : Any =VOCAB_FILES_NAMES
lowerCamelCase : int =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] =["""input_ids""", """attention_mask"""]
lowerCamelCase : List[Any] =[]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
a : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
a : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
a : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
a : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
a : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
a : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
a : Optional[Any] = vocab_file
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def __a ( self ) -> Any:
return self.sp_model.get_piece_size()
def __a ( self ) -> str:
a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
a : Optional[int] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
a : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a : str = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : int = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def __a ( self , lowerCAmelCase__ ) -> int:
a : List[str] = []
a : List[str] = ""
a : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
a : Union[str, Any] = True
a : Optional[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
a : Tuple = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> str:
a : Optional[Any] = kwargs.pop("use_source_tokenizer" , _UpperCAmelCase )
a : int = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a : List[str] = []
a : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
a : Tuple = []
sub_texts.append(_UpperCAmelCase )
else:
current_sub_text.append(_UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a : str = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_UpperCAmelCase ) )
else:
a : int = "".join(_UpperCAmelCase )
a : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a : Optional[int] = self.clean_up_tokenization(_UpperCAmelCase )
return clean_text
else:
return text
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , "wb" ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Any = [self.cls_token_id]
a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Tuple = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 633 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Any ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
A_ : Optional[int] = size if size is not None else {"shortest_edge": 224}
A_ : Optional[Any] = get_size_dict(_UpperCAmelCase ,default_to_square=_UpperCAmelCase )
A_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
A_ : Optional[Any] = get_size_dict(_UpperCAmelCase ,default_to_square=_UpperCAmelCase ,param_name="""crop_size""" )
A_ : Tuple = do_resize
A_ : Tuple = size
A_ : str = resample
A_ : Optional[Any] = do_center_crop
A_ : List[str] = crop_size
A_ : Union[str, Any] = do_rescale
A_ : List[str] = rescale_factor
A_ : Optional[Any] = do_normalize
A_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : int = do_convert_rgb
def _a ( self : int ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : str ,):
'''simple docstring'''
A_ : Any = get_size_dict(_UpperCAmelCase ,default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase ,size=size["""shortest_edge"""] ,default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def _a ( self : Any ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Dict ,):
'''simple docstring'''
A_ : Dict = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_UpperCAmelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return rescale(_UpperCAmelCase ,scale=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def _a ( self : Union[str, Any] ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
return normalize(_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def _a ( self : List[str] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : Dict ,):
'''simple docstring'''
A_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Union[str, Any] = get_size_dict(_UpperCAmelCase ,param_name="""size""" ,default_to_square=_UpperCAmelCase )
A_ : Optional[Any] = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : str = crop_size if crop_size is not None else self.crop_size
A_ : List[str] = get_size_dict(_UpperCAmelCase ,param_name="""crop_size""" ,default_to_square=_UpperCAmelCase )
A_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Dict = image_mean if image_mean is not None else self.image_mean
A_ : Dict = image_std if image_std is not None else self.image_std
A_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Tuple = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
A_ : List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
A_ : Optional[Any] = [self.resize(image=_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
A_ : List[str] = [self.center_crop(image=_UpperCAmelCase ,size=_UpperCAmelCase ) for image in images]
if do_rescale:
A_ : int = [self.rescale(image=_UpperCAmelCase ,scale=_UpperCAmelCase ) for image in images]
if do_normalize:
A_ : Optional[Any] = [self.normalize(image=_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ) for image in images]
A_ : Tuple = [to_channel_dimension_format(_UpperCAmelCase ,_UpperCAmelCase ) for image in images]
A_ : str = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
| 665 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __a ( __lowerCamelCase ):
__snake_case : Union[str, Any] = 42
__snake_case : int = jnp.floataa
__snake_case : Optional[int] = True
def A ( self : Optional[int] ):
super().setup()
lowerCAmelCase_ : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : str = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __a ( __lowerCamelCase ):
__snake_case : int = FlaxBigBirdForNaturalQuestionsModule
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
def cross_entropy(lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int]=None ):
lowerCAmelCase_ : Optional[int] = logits.shape[-1]
lowerCAmelCase_ : str = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowerCAmelCase_ : Dict = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowerCAmelCase_ : str = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCAmelCase_ : Tuple = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowerCAmelCase_ : Any = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowerCAmelCase_ : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __a :
__snake_case : Dict = """google/bigbird-roberta-base"""
__snake_case : Dict = 3000
__snake_case : List[str] = 1_0500
__snake_case : List[str] = 128
__snake_case : Optional[Any] = 3
__snake_case : Dict = 1
__snake_case : Tuple = 5
# tx_args
__snake_case : Any = 3e-5
__snake_case : Optional[int] = 0.0
__snake_case : int = 2_0000
__snake_case : Any = 0.0095
__snake_case : int = """bigbird-roberta-natural-questions"""
__snake_case : Dict = """training-expt"""
__snake_case : str = """data/nq-training.jsonl"""
__snake_case : Any = """data/nq-validation.jsonl"""
def A ( self : Tuple ):
os.makedirs(self.base_dir , exist_ok=_UpperCAmelCase )
lowerCAmelCase_ : List[str] = os.path.join(self.base_dir , self.save_dir )
lowerCAmelCase_ : List[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class __a :
__snake_case : Optional[Any] = 42
__snake_case : Dict = 4096 # no dynamic padding on TPUs
def __call__( self : List[str] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = self.collate_fn(_UpperCAmelCase )
lowerCAmelCase_ : Any = jax.tree_util.tree_map(_UpperCAmelCase , _UpperCAmelCase )
return batch
def A ( self : Tuple , UpperCAmelCase : int ):
lowerCAmelCase_ : List[Any] = self.fetch_inputs(features["""input_ids"""] )
lowerCAmelCase_ : List[Any] = {
"input_ids": jnp.array(_UpperCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(_UpperCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def A ( self : List[Any] , UpperCAmelCase : list ):
lowerCAmelCase_ : Tuple = [self._fetch_inputs(_UpperCAmelCase ) for ids in input_ids]
return zip(*_UpperCAmelCase )
def A ( self : int , UpperCAmelCase : list ):
lowerCAmelCase_ : int = [1 for _ in range(len(_UpperCAmelCase ) )]
while len(_UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : int , lowercase__ : Optional[int]=None ) -> Dict:
'''simple docstring'''
if seed is not None:
lowerCAmelCase_ : List[Any] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowerCAmelCase_ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : int , **lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
def loss_fn(lowercase__ : int ):
lowerCAmelCase_ : str = model_inputs.pop("""start_labels""" )
lowerCAmelCase_ : int = model_inputs.pop("""end_labels""" )
lowerCAmelCase_ : int = model_inputs.pop("""pooled_labels""" )
lowerCAmelCase_ : Dict = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase_ : Dict = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = grad_fn(state.params )
lowerCAmelCase_ : Optional[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowerCAmelCase_ : Optional[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowerCAmelCase_ : Dict = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def __UpperCamelCase ( lowercase__ : Any , **lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = model_inputs.pop("""start_labels""" )
lowerCAmelCase_ : Tuple = model_inputs.pop("""end_labels""" )
lowerCAmelCase_ : Optional[int] = model_inputs.pop("""pooled_labels""" )
lowerCAmelCase_ : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = outputs
lowerCAmelCase_ : int = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __a ( train_state.TrainState ):
__snake_case : int = struct.field(pytree_node=__lowerCamelCase )
@dataclass
class __a :
__snake_case : str = 42
__snake_case : Dict = 42
__snake_case : int = 42
__snake_case : Any = 42
__snake_case : Optional[int] = 42
__snake_case : List[str] = 42
__snake_case : Optional[Any] = None
def A ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None ):
lowerCAmelCase_ : Optional[Any] = model.params
lowerCAmelCase_ : int = TrainState.create(
apply_fn=model.__call__ , params=_UpperCAmelCase , tx=_UpperCAmelCase , loss_fn=_UpperCAmelCase , )
if ckpt_dir is not None:
lowerCAmelCase_ : List[Any] = restore_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCAmelCase_ : Union[str, Any] = build_tx(**_UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = train_state.TrainState(
step=_UpperCAmelCase , apply_fn=model.__call__ , params=_UpperCAmelCase , tx=_UpperCAmelCase , opt_state=_UpperCAmelCase , )
lowerCAmelCase_ : int = args
lowerCAmelCase_ : str = data_collator
lowerCAmelCase_ : List[Any] = lr
lowerCAmelCase_ : List[Any] = params
lowerCAmelCase_ : Any = jax_utils.replicate(_UpperCAmelCase )
return state
def A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ):
lowerCAmelCase_ : Optional[int] = self.args
lowerCAmelCase_ : Union[str, Any] = len(_UpperCAmelCase ) // args.batch_size
lowerCAmelCase_ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : str = jax.random.split(_UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCAmelCase_ : List[Any] = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = get_batched_dataset(_UpperCAmelCase , args.batch_size , seed=_UpperCAmelCase )
lowerCAmelCase_ : Dict = 0
for batch in tqdm(_UpperCAmelCase , total=_UpperCAmelCase , desc=F'Running EPOCH-{epoch}' ):
lowerCAmelCase_ : List[str] = self.data_collator(_UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.train_step_fn(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowerCAmelCase_ : Optional[int] = jax_utils.unreplicate(state.step )
lowerCAmelCase_ : str = running_loss.item() / i
lowerCAmelCase_ : Dict = self.scheduler_fn(state_step - 1 )
lowerCAmelCase_ : str = self.evaluate(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ : List[str] = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(_UpperCAmelCase ) )
self.logger.log(_UpperCAmelCase , commit=_UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=_UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any ):
lowerCAmelCase_ : Optional[int] = get_batched_dataset(_UpperCAmelCase , self.args.batch_size )
lowerCAmelCase_ : List[str] = len(_UpperCAmelCase ) // self.args.batch_size
lowerCAmelCase_ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase_ : Dict = 0
for batch in tqdm(_UpperCAmelCase , total=_UpperCAmelCase , desc="""Evaluating ... """ ):
lowerCAmelCase_ : Optional[int] = self.data_collator(_UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = self.val_step_fn(_UpperCAmelCase , **_UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def A ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : int = jax_utils.unreplicate(_UpperCAmelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(_UpperCAmelCase , params=state.params )
with open(os.path.join(_UpperCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_UpperCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_UpperCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_UpperCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _UpperCAmelCase )
print("""DONE""" )
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowerCAmelCase_ : Union[str, Any] = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowerCAmelCase_ : List[str] = from_bytes(state.opt_state , f.read() )
lowerCAmelCase_ : str = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowerCAmelCase_ : List[str] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowerCAmelCase_ : List[Any] = json.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = training_state["step"]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = num_train_steps - warmup_steps
lowerCAmelCase_ : Optional[Any] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1E-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(lowercase__ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 600 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0_0_0_0_0 ):
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 121 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__: Dict = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
UpperCamelCase__: List[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__: int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( _lowerCAmelCase : str ) -> dict[str, int]:
UpperCAmelCase : List[str] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ ( _lowerCAmelCase : tuple ) -> str:
return x[0]
def snake_case_ ( _lowerCAmelCase : str ) -> str:
UpperCAmelCase : int = get_letter_count(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Union[str, Any] = "".join(freq_to_letter[freq] )
UpperCAmelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( _lowerCAmelCase : str ) -> int:
UpperCAmelCase : Any = get_frequency_order(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Dict = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"]
return input_features
def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
| 663 | 0 |
'''simple docstring'''
from __future__ import annotations
__snake_case : List[Any] = 8.988E9 # units = N * m^s * C^-2
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float, lowerCamelCase__ : float, lowerCamelCase__ : float ):
_a = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
_a = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_a = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_a = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_a = (COULOMBS_CONSTANT * charge_product / abs(SCREAMING_SNAKE_CASE__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
__SCREAMING_SNAKE_CASE = '''▁'''
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = BarthezTokenizer
def __init__( self : Dict , A__ : List[Any]=None , A__ : Tuple=None , A__ : List[str]="<s>" , A__ : Optional[int]="</s>" , A__ : Optional[int]="</s>" , A__ : Union[str, Any]="<s>" , A__ : Optional[Any]="<unk>" , A__ : int="<pad>" , A__ : Dict="<mask>" , **A__ : List[str] , ) -> Any:
'''simple docstring'''
a__ : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
a__ : List[Any] = vocab_file
a__ : Optional[int] = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : int = [self.cls_token_id]
a__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[str] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : List[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 688 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase_ ( __A, __A, __A, __A, ) -> list[float]:
'''simple docstring'''
UpperCAmelCase__ = coefficient_matrix.shape
UpperCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase__ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if colsa != 1:
UpperCAmelCase__ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if rowsa != rowsa:
UpperCAmelCase__ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != rowsa:
UpperCAmelCase__ = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(SCREAMING_SNAKE_CASE__ )} and {rowsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
UpperCAmelCase__ = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE__ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = []
for row in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = 0
for col in range(SCREAMING_SNAKE_CASE__ ):
if col == row:
UpperCAmelCase__ = table[row][col]
elif col == cols - 1:
UpperCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase__ = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = new_val
return [float(SCREAMING_SNAKE_CASE__ ) for i in new_val]
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = table.shape
UpperCAmelCase__ = True
for i in range(0, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = BertTokenizer
_lowerCAmelCase : Union[str, Any] = BertTokenizerFast
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = filter_non_english
def A( self):
super().setUp()
__UpperCAmelCase : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A( self , lowercase__):
__UpperCAmelCase : Dict = "UNwant\u00E9d,running"
__UpperCAmelCase : List[str] = "unwanted, running"
return input_text, output_text
def A( self):
__UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file)
__UpperCAmelCase : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [9, 6, 7, 1_2, 1_0, 1_1])
def A( self):
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = "UNwant\u00E9d,running"
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase)
__UpperCAmelCase : str = rust_tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__UpperCAmelCase : List[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : Dict = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(_UpperCAmelCase)
__UpperCAmelCase : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
# With lower casing
__UpperCAmelCase : str = self.get_tokenizer(do_lower_case=_UpperCAmelCase)
__UpperCAmelCase : List[Any] = self.get_rust_tokenizer(do_lower_case=_UpperCAmelCase)
__UpperCAmelCase : Dict = "UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase)
__UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__UpperCAmelCase : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : List[str] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[int] = tokenizer.encode(_UpperCAmelCase)
__UpperCAmelCase : Any = rust_tokenizer.encode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def A( self):
__UpperCAmelCase : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A( self):
__UpperCAmelCase : int = BasicTokenizer(do_lower_case=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def A( self):
__UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : str = BasicTokenizer(do_lower_case=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A( self):
__UpperCAmelCase : Union[str, Any] = BasicTokenizer()
__UpperCAmelCase : List[Any] = "a\n'll !!to?'d of, can't."
__UpperCAmelCase : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(_UpperCAmelCase) , _UpperCAmelCase)
def A( self):
__UpperCAmelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(_UpperCAmelCase):
__UpperCAmelCase : Any = i
__UpperCAmelCase : str = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def A( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A( self):
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCAmelCase) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
@slow
def A( self):
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained('''bert-base-uncased''')
__UpperCAmelCase : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase)
__UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__UpperCAmelCase : List[str] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__UpperCAmelCase : Optional[int] = tokenizer_r.encode_plus(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , )
__UpperCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase , '''do_lower_case''') else False
__UpperCAmelCase : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def A( self):
__UpperCAmelCase : Union[str, Any] = ["的", "人", "有"]
__UpperCAmelCase : Dict = "".join(_UpperCAmelCase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__UpperCAmelCase : List[Any] = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : int = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase)
__UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__UpperCAmelCase : Union[str, Any] = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : List[Any] = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__UpperCAmelCase : str = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase)
__UpperCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase)
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase : List[Any] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase)
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
| 462 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
_lowerCamelCase : Optional[Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) )
SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"]
SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : str = params["dim"]
SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : Dict = n_heads
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : Dict = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : List[Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE__ )
]
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : str = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : List[str] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) )
SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56
SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = [False] * len(SCREAMING_SNAKE_CASE__ )
A_ : Optional[Any] = []
queue.append(SCREAMING_SNAKE_CASE__ )
A_ : Union[str, Any] = True
while queue:
A_ : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
A_ : Optional[Any] = True
A_ : Optional[int] = u
return visited[t]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
A_ : int = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A_ : int = float('''Inf''' )
A_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
A_ : str = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
A_ : List[str] = parent[s]
max_flow += path_flow
A_ : Tuple = sink
while v != source:
A_ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A_ : Dict = parent[v]
return max_flow
UpperCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 590 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = getLogger(__name__)
def _lowercase ( __lowerCamelCase : Any ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 1024 ,__lowerCamelCase : List[str]="val" ,__lowerCamelCase : Dict=None ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : Union[str, Any]="summarization" ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : Union[str, Any]=1 ,__lowerCamelCase : Dict = None ,__lowerCamelCase : List[str]="" ,**__lowerCamelCase : Union[str, Any] ,) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' ,rank=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[str] = Path(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[int] = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).cuda()
if fpaa:
UpperCamelCase__ : Union[str, Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) # update config with task specific params
UpperCamelCase__ : str = generate_kwargs.pop('''num_beams''' ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase__ : Tuple = num_return_sequences
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase__ : List[str] = tokenizer.model_max_length
if prefix is None:
UpperCamelCase__ : List[str] = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ""
UpperCamelCase__ : Any = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,max_target_length=1024 ,type_path=SCREAMING_SNAKE_CASE__ ,n_obs=SCREAMING_SNAKE_CASE__ ,prefix=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase__ : int = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE__ ,distributed=SCREAMING_SNAKE_CASE__ ,add_extra_examples=SCREAMING_SNAKE_CASE__ ,shuffle=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : int = DataLoader(SCREAMING_SNAKE_CASE__ ,sampler=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,collate_fn=ds.collate_fn )
UpperCamelCase__ : Any = []
for batch in tqdm(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ : int = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) ,attention_mask=batch['''attention_mask'''].to(model.device ) ,num_return_sequences=SCREAMING_SNAKE_CASE__ ,num_beams=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
UpperCamelCase__ : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[Any] = batch["ids"]
if num_return_sequences > 1:
UpperCamelCase__ : Union[str, Any] = chunks(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return results, sampler.num_replicas
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' ,type=SCREAMING_SNAKE_CASE__ ,help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' ,type=SCREAMING_SNAKE_CASE__ ,help='''like facebook/bart-large-cnn,t5-base, etc.''' ,default='''sshleifer/distilbart-xsum-12-3''' ,)
parser.add_argument('''--save_dir''' ,type=SCREAMING_SNAKE_CASE__ ,help='''where to save''' ,default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
'''--type_path''' ,type=SCREAMING_SNAKE_CASE__ ,default='''test''' ,help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' ,type=SCREAMING_SNAKE_CASE__ ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=SCREAMING_SNAKE_CASE__ ,default=8 ,required=SCREAMING_SNAKE_CASE__ ,help='''batch size''' )
parser.add_argument(
'''--local_rank''' ,type=SCREAMING_SNAKE_CASE__ ,default=-1 ,required=SCREAMING_SNAKE_CASE__ ,help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ ,help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' ,type=SCREAMING_SNAKE_CASE__ ,default=1 ,required=SCREAMING_SNAKE_CASE__ ,help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' ,type=SCREAMING_SNAKE_CASE__ ,default=600 ,required=SCREAMING_SNAKE_CASE__ ,help='''How long should master process wait for other processes to finish.''' ,)
parser.add_argument('''--src_lang''' ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ )
parser.add_argument('''--tgt_lang''' ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
'''--prefix''' ,type=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--debug''' ,action='''store_true''' )
UpperCamelCase__ : Tuple = time.time()
UpperCamelCase__ : str = parser.parse_known_args()
UpperCamelCase__ : Dict = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
UpperCamelCase__ : str = Path(args.save_dir + '''_tmp''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) # this handles locking.
UpperCamelCase__ : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase__ : Dict = {}
if args.src_lang is not None:
UpperCamelCase__ : Optional[int] = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase__ : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[Any] = eval_data_dir(
args.data_dir ,SCREAMING_SNAKE_CASE__ ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if args.local_rank <= 0:
UpperCamelCase__ : Union[str, Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Tuple = gather_results_from_each_node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,args.sync_timeout )
UpperCamelCase__ : Optional[int] = combine_partial_results(SCREAMING_SNAKE_CASE__ )
if args.num_return_sequences > 1:
UpperCamelCase__ : Optional[int] = save_dir.joinpath('''pseudolabel_results.json''' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return
UpperCamelCase__ : int = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(SCREAMING_SNAKE_CASE__ ) as f:
UpperCamelCase__ : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase__ : Any = "translation" in args.task
UpperCamelCase__ : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase__ : Union[str, Any] = "bleu" if calc_bleu else "rouge"
UpperCamelCase__ : Dict = score_fn(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[str] = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Dict = time.time() - start_time
UpperCamelCase__ : List[str] = round(runtime / metrics['''n_obs'''] ,4 )
UpperCamelCase__ : Union[str, Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase__ : Any = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
write_txt_file(SCREAMING_SNAKE_CASE__ ,save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE__ ,save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _lowercase ( __lowerCamelCase : Union[str, Any] ) -> List:
'''simple docstring'''
UpperCamelCase__ : Any = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Any = sorted(SCREAMING_SNAKE_CASE__ ,key=lambda __lowerCamelCase : x["id"] )
UpperCamelCase__ : List[Any] = [x["pred"] for x in records]
return preds
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any] ) -> List[Dict[str, List]]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
UpperCamelCase__ : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCamelCase__ : Dict = list(save_dir.glob('''rank_*.json''' ) )
if len(SCREAMING_SNAKE_CASE__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase__ : str = lmap(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 344 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( __lowerCamelCase ):
lowerCamelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
lowerCamelCase : Optional[Any] ="""AutoImageProcessor"""
lowerCamelCase : Any ="""AutoTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
a : Any = self.image_processor
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Any:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
a : Tuple = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
a : int = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
a : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __a ( self ) -> int:
return ["input_ids", "attention_mask", "pixel_values"]
| 633 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __a ( __lowerCamelCase ,unittest.TestCase ):
__snake_case : int = ShapEPipeline
__snake_case : Dict = ["""prompt"""]
__snake_case : Dict = ["""prompt"""]
__snake_case : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__snake_case : Any = False
@property
def A ( self : List[str] ):
return 32
@property
def A ( self : int ):
return 32
@property
def A ( self : int ):
return self.time_input_dim * 4
@property
def A ( self : Optional[int] ):
return 8
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def A ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase_ : str = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase_ : List[str] = PriorTransformer(**_UpperCAmelCase )
return model
@property
def A ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : List[Any] = ShapERenderer(**_UpperCAmelCase )
return model
def A ( self : str ):
lowerCAmelCase_ : str = self.dummy_prior
lowerCAmelCase_ : int = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : Any = self.dummy_renderer
lowerCAmelCase_ : Optional[int] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase_ : Tuple = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0 ):
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Dict = torch.manual_seed(_UpperCAmelCase )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase_ : Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def A ( self : List[Any] ):
lowerCAmelCase_ : Tuple = "cpu"
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Tuple = self.pipeline_class(**_UpperCAmelCase )
lowerCAmelCase_ : str = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase_ : str = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = output.images[0]
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ):
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Tuple ):
lowerCAmelCase_ : Any = torch_device == "cpu"
lowerCAmelCase_ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def A ( self : str ):
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = self.pipeline_class(**_UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : str = 2
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : List[Any] = batch_size * [inputs[key]]
lowerCAmelCase_ : Union[str, Any] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def A ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
lowerCAmelCase_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
lowerCAmelCase_ : Optional[Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
lowerCAmelCase_ : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe(
"""a shark""" , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 600 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :list[int] ):
if not nums:
return 0
UpperCAmelCase_ = nums[0]
UpperCAmelCase_ = 0
for num in nums[1:]:
UpperCAmelCase_ = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
)
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 |
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 663 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__: Optional[Any] = list[list[float | int]]
def snake_case_ ( _lowerCAmelCase : Matrix , _lowerCAmelCase : Matrix ) -> Matrix:
UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float
for row in range(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[Any] = matrix[row][col]
UpperCAmelCase : List[Any] = vector[row][0]
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : str = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE__ ):
for row in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE__ )
]
def snake_case_ ( _lowerCAmelCase : list[int] ) -> Callable[[int], int]:
UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Matrix = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : Matrix = [[0] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : Matrix
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[str] = (x_val + 1) ** (size - col - 1)
UpperCAmelCase : Tuple = y_val
UpperCAmelCase : Union[str, Any] = solve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def interpolated_func(_lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE__ ) )
return interpolated_func
def snake_case_ ( _lowerCAmelCase : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case_ ( _lowerCAmelCase : Callable[[int], int] = question_function , _lowerCAmelCase : int = 10 ) -> int:
UpperCAmelCase : list[int] = [func(SCREAMING_SNAKE_CASE__ ) for x_val in range(1 , order + 1 )]
UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase : int = 0
UpperCAmelCase : Callable[[int], int]
UpperCAmelCase : int
for poly in polynomials:
UpperCAmelCase : Tuple = 1
while func(SCREAMING_SNAKE_CASE__ ) == poly(SCREAMING_SNAKE_CASE__ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE__ )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def _lowercase ( lowerCamelCase__ : np.ndarray, lowerCamelCase__ : np.ndarray, lowerCamelCase__ : np.ndarray, lowerCamelCase__ : np.ndarray | None = None, ):
_a = np.shape(SCREAMING_SNAKE_CASE__ )
_a = np.shape(SCREAMING_SNAKE_CASE__ )
_a = np.shape(SCREAMING_SNAKE_CASE__ )
if shape_a[0] != shape_b[0]:
_a = (
"Expected the same number of rows for A and B. "
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if shape_b[1] != shape_c[1]:
_a = (
"Expected the same number of columns for B and C. "
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
_a = pseudo_inv
if a_inv is None:
try:
_a = np.linalg.inv(SCREAMING_SNAKE_CASE__ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> None:
_a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a = np.array([[0, 3], [3, 0], [2, 3]] )
_a = np.array([[2, 1], [6, 3]] )
_a = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_a = np.block([[a, b], [b.T, c]] )
_a = np.linalg.det(_UpperCAmelCase )
_a = np.linalg.det(_UpperCAmelCase )
_a = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def __lowerCAmelCase ( self ) -> None:
_a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a = np.array([[0, 3], [3, 0], [2, 3]] )
_a = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowerCAmelCase ( self ) -> None:
_a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a = np.array([[0, 3], [3, 0], [2, 3]] )
_a = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 131 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , A__ : Tuple , A__ : Tuple=3 , A__ : Tuple=3_2 , A__ : Optional[Any]=3 , A__ : Tuple=1_0 , A__ : Tuple=[1_0, 2_0, 3_0, 4_0] , A__ : str=[1, 1, 2, 1] , A__ : Optional[int]=True , A__ : Dict=True , A__ : int="relu" , A__ : Optional[Any]=3 , A__ : Tuple=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str = parent
a__ : Dict = batch_size
a__ : Tuple = image_size
a__ : str = num_channels
a__ : Any = embeddings_size
a__ : Dict = hidden_sizes
a__ : List[Any] = depths
a__ : Tuple = is_training
a__ : Tuple = use_labels
a__ : List[str] = hidden_act
a__ : int = num_labels
a__ : Any = scope
a__ : List[str] = len(_UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[int] = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self : Dict , A__ : int , A__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : Any = FlaxRegNetModel(config=_UpperCAmelCase )
a__ : List[str] = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.num_labels
a__ : Optional[Any] = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
a__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ : int = config_and_inputs
a__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : int ) -> None:
'''simple docstring'''
a__ : List[str] = FlaxRegNetModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] = model_class(_UpperCAmelCase )
a__ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(A__ : int , A__ : Dict , A__ : str ):
a__ : List[Any] = model_class(_UpperCAmelCase )
a__ : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
a__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
a__ : Tuple = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(A__ : str , **A__ : int ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
a__ : List[str] = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
a__ : List[str] = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( ):
a__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
a__ : Dict = self.default_image_processor
a__ : Union[str, Any] = prepare_img()
a__ : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''np''' )
a__ : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
a__ : List[str] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
a__ : Tuple = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 688 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.