code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCamelCase : Dict = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def snake_case ( lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCamelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCamelCase , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
__lowercase = r""".*sequential.(\d+).*"""
__lowercase = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(lowerCamelCase , lowerCamelCase )
if re.match(lowerCamelCase , lowerCamelCase ):
# replace sequential layers with list
__lowercase = re.match(lowerCamelCase , lowerCamelCase ).group(1 )
__lowercase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCamelCase )//3}.linear.' )
elif re.match(lowerCamelCase , lowerCamelCase ):
__lowercase = int(re.match(lowerCamelCase , lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowercase = 1 if projecton_layer == 0 else 2
__lowercase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowercase = value
__lowercase = mixed_qkv.size(0 ) // 3
__lowercase = mixed_qkv[:qkv_dim]
__lowercase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowercase = mixed_qkv[qkv_dim * 2 :]
__lowercase = query_layer
__lowercase = key_layer
__lowercase = value_layer
else:
__lowercase = value
return model_state_dict
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = init_clap(lowerCamelCase , enable_fusion=lowerCamelCase )
clap_model.eval()
__lowercase = clap_model.state_dict()
__lowercase = rename_state_dict(lowerCamelCase )
__lowercase = ClapConfig()
__lowercase = enable_fusion
__lowercase = ClapModel(lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
transformers_config.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 80 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
import numpy as np
_snake_case : str = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class a :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
__snake_case : Optional[int] = np.array(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> np.ndarray:
__snake_case , __snake_case : Optional[int] = np.where(letter == self.SQUARE )
__snake_case : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int ) -> str:
__snake_case : Optional[int] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> str:
__snake_case : Dict = message.lower()
__snake_case : List[Any] = message.replace(" " , "" )
__snake_case : List[Any] = message.replace("j" , "i" )
__snake_case : Optional[Any] = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = self.letter_to_numbers(message[letter_index] )
__snake_case : Any = numbers[0]
__snake_case : Dict = numbers[1]
__snake_case : Optional[Any] = first_step.reshape(2 * len(lowerCamelCase ) )
__snake_case : str = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : str = int(second_step[numbers_index * 2] )
__snake_case : List[Any] = int(second_step[(numbers_index * 2) + 1] )
__snake_case : Optional[Any] = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = encoded_message + letter
return encoded_message
def __snake_case ( self : Any , lowerCamelCase : str ) -> str:
__snake_case : Tuple = message.lower()
message.replace(" " , "" )
__snake_case : Dict = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : str = self.letter_to_numbers(message[letter_index] )
__snake_case : Dict = numbers[0]
__snake_case : Union[str, Any] = numbers[1]
__snake_case : int = first_step.reshape((2, len(lowerCamelCase )) )
__snake_case : List[Any] = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = int(second_step[0, numbers_index] )
__snake_case : Optional[Any] = int(second_step[1, numbers_index] )
__snake_case : str = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = decoded_message + letter
return decoded_message
| 81 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''bertabs'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=30522 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=6 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : int=512 , _UpperCAmelCase : Any=0.2 , _UpperCAmelCase : List[Any]=6 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : List[str]=2048 , _UpperCAmelCase : str=0.2 , **_UpperCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 82 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Tuple = "timm_backbone"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = backbone
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = features_only
_lowerCamelCase : List[Any] = use_pretrained_backbone
_lowerCamelCase : int = True
_lowerCamelCase : List[str] = out_indices if out_indices is not None else (-1,)
| 83 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowercase = 4
lowercase = (1 << p) - 1
for _ in range(p - 2 ):
lowercase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 85 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a :Optional[Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a :List[str] = concatenate_datasets
__a :Optional[Any] = DownloadConfig
__a :Any = DownloadManager
__a :Any = DownloadMode
__a :Any = DownloadConfig
__a :int = DownloadMode
__a :Union[str, Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 86 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCamelCase : Union[str, Any] = """scheduler_config.json"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 5
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = SCHEDULER_CONFIG_NAME
UpperCAmelCase__ = ['''dtype''']
UpperCAmelCase__ = []
UpperCAmelCase__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ , A__ = cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__)
if hasattr(UpperCAmelCase__ , '''create_state''') and getattr(UpperCAmelCase__ , '''has_state''' , UpperCAmelCase__):
A__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int) ->Dict:
'''simple docstring'''
A__ = list(set([cls.__name__] + cls._compatibles))
A__ = importlib.import_module(__name__.split('''.''')[0])
A__ = [
getattr(UpperCAmelCase__ , UpperCAmelCase__) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__)
]
return compatible_classes
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> jnp.ndarray:
"""simple docstring"""
assert len(lowercase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase_ ) - x.ndim) ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=0.9_99 , lowercase_=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(lowercase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
A__ = []
for i in range(lowercase_ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase_ ) / alpha_bar(lowercase_ ) , lowercase_ ) )
return jnp.array(lowercase_ , dtype=lowercase_ )
@flax.struct.dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
A__ = scheduler.config
if config.trained_betas is not None:
A__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
A__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""")
A__ = 1.0 - betas
A__ = jnp.cumprod(UpperCAmelCase__ , axis=0)
return cls(
alphas=UpperCAmelCase__ , betas=UpperCAmelCase__ , alphas_cumprod=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = state.alphas_cumprod
A__ = alphas_cumprod[timesteps] ** 0.5
A__ = sqrt_alpha_prod.flatten()
A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape )
A__ = (1 - alphas_cumprod[timesteps]) ** 0.5
A__ = sqrt_one_minus_alpha_prod.flatten()
A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 87 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase = NewType("""DataClass""", Any)
UpperCAmelCase = NewType("""DataClassType""", Any)
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( __snake_case : list ):
"""simple docstring"""
_lowerCamelCase : int = {str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case )
def _snake_case ( *,
__snake_case : Union[str, List[str]] = None , __snake_case : str = None , __snake_case : Any = dataclasses.MISSING , __snake_case : Callable[[], Any] = dataclasses.MISSING , __snake_case : dict = None , **__snake_case : List[str] , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCamelCase : str = {}
if aliases is not None:
_lowerCamelCase : Optional[Any] = aliases
if help is not None:
_lowerCamelCase : Any = help
return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case )
class lowercase__ ( A_ ):
__UpperCAmelCase = 42
def __init__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_lowerCamelCase : Dict = ArgumentDefaultsHelpFormatter
super().__init__(**SCREAMING_SNAKE_CASE)
if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = [dataclass_types]
_lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(SCREAMING_SNAKE_CASE)
@staticmethod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Dict = F'--{field.name}'
_lowerCamelCase : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , SCREAMING_SNAKE_CASE):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""")
_lowerCamelCase : Any = kwargs.pop("""aliases""" , [])
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[Any] = [aliases]
_lowerCamelCase : Optional[int] = getattr(field.type , """__origin__""" , field.type)
if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE , """UnionType""") and isinstance(SCREAMING_SNAKE_CASE , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(SCREAMING_SNAKE_CASE) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F' Problem encountered in field \'{field.name}\'.')
if type(SCREAMING_SNAKE_CASE) not in field.type.__args__:
# filter `str` in Union
_lowerCamelCase : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCamelCase : Union[str, Any] = getattr(field.type , """__origin__""" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCamelCase : List[str] = (
field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE , field.type.__args__[1]) else field.type.__args__[1]
)
_lowerCamelCase : Tuple = getattr(field.type , """__origin__""" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCamelCase : Dict = {}
if origin_type is Literal or (isinstance(field.type , SCREAMING_SNAKE_CASE) and issubclass(field.type , SCREAMING_SNAKE_CASE)):
if origin_type is Literal:
_lowerCamelCase : Union[str, Any] = field.type.__args__
else:
_lowerCamelCase : Optional[int] = [x.value for x in field.type]
_lowerCamelCase : int = make_choice_type_function(kwargs["""choices"""])
if field.default is not dataclasses.MISSING:
_lowerCamelCase : Optional[int] = field.default
else:
_lowerCamelCase : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCamelCase : Dict = copy(SCREAMING_SNAKE_CASE)
# Hack because type=bool in argparse does not behave as we want.
_lowerCamelCase : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCamelCase : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCamelCase : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCamelCase : str = """?"""
# This is the value that will get picked if we do --field_name (without value)
_lowerCamelCase : Dict = True
elif isclass(SCREAMING_SNAKE_CASE) and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Any = field.type.__args__[0]
_lowerCamelCase : Optional[int] = """+"""
if field.default_factory is not dataclasses.MISSING:
_lowerCamelCase : int = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCamelCase : Dict = True
else:
_lowerCamelCase : List[Any] = field.type
if field.default is not dataclasses.MISSING:
_lowerCamelCase : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCamelCase : List[Any] = field.default_factory()
else:
_lowerCamelCase : int = True
parser.add_argument(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCamelCase : Tuple = False
parser.add_argument(F'--no_{field.name}' , action="""store_false""" , dest=field.name , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
if hasattr(SCREAMING_SNAKE_CASE , """_argument_group_name"""):
_lowerCamelCase : List[Any] = self.add_argument_group(dtype._argument_group_name)
else:
_lowerCamelCase : str = self
try:
_lowerCamelCase : Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[int] = """.""".join(map(SCREAMING_SNAKE_CASE , sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""") from ex
raise
for field in dataclasses.fields(SCREAMING_SNAKE_CASE):
if not field.init:
continue
_lowerCamelCase : Union[str, Any] = type_hints[field.name]
self._parse_dataclass_field(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
_lowerCamelCase : str = []
if args_filename:
args_files.append(Path(SCREAMING_SNAKE_CASE))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(""".args"""))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCamelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , action="""append""")
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCamelCase , _lowerCamelCase : int = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = vars(SCREAMING_SNAKE_CASE).get(args_file_flag.lstrip("""-""") , SCREAMING_SNAKE_CASE)
if cmd_args_file_paths:
args_files.extend([Path(SCREAMING_SNAKE_CASE) for p in cmd_args_file_paths])
_lowerCamelCase : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCamelCase : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCamelCase , _lowerCamelCase : Tuple = self.parse_known_args(args=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = []
for dtype in self.dataclass_types:
_lowerCamelCase : str = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init}
_lowerCamelCase : str = {k: v for k, v in vars(SCREAMING_SNAKE_CASE).items() if k in keys}
for k in keys:
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = dtype(**SCREAMING_SNAKE_CASE)
outputs.append(SCREAMING_SNAKE_CASE)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(SCREAMING_SNAKE_CASE)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
_lowerCamelCase : List[str] = set(args.keys())
_lowerCamelCase : List[Any] = []
for dtype in self.dataclass_types:
_lowerCamelCase : Dict = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init}
_lowerCamelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
_lowerCamelCase : Any = dtype(**SCREAMING_SNAKE_CASE)
outputs.append(SCREAMING_SNAKE_CASE)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE)}')
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
with open(Path(SCREAMING_SNAKE_CASE) , encoding="""utf-8""") as open_json_file:
_lowerCamelCase : Optional[int] = json.loads(open_json_file.read())
_lowerCamelCase : Optional[Any] = self.parse_dict(SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
_lowerCamelCase : Optional[int] = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE).read_text()) , allow_extra_keys=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
| 88 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
SCREAMING_SNAKE_CASE : int = tuple[float, float, float]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : int = end_pointa[0] - end_pointa[0]
_lowercase : List[Any] = end_pointa[1] - end_pointa[1]
_lowercase : Optional[int] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowercase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowercase : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 ) -> bool:
_lowercase : int = create_vector(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 89 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.abspath(A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase__ = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(A )
# read data
lowerCAmelCase__ = tf.train.load_variable(A , A )
names.append('''/'''.join(A ) )
arrays.append(A )
logger.info(F"""Read a total of {len(A ):,} layers""" )
# Sanity check
if len(set(A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(A ) )})""" )
lowerCAmelCase__ = list(set(A ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(A , A ):
lowerCAmelCase__ = full_name.split('''/''' )
lowerCAmelCase__ = model
lowerCAmelCase__ = []
for i, m_name in enumerate(A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowerCAmelCase__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowerCAmelCase__ = getattr(A , '''encoder''' )
lowerCAmelCase__ = getattr(A , '''layer''' )
lowerCAmelCase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''pooler''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowerCAmelCase__ = getattr(A , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowerCAmelCase__ = getattr(A , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowerCAmelCase__ = getattr(A , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowerCAmelCase__ = getattr(A , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowerCAmelCase__ = getattr(A , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowerCAmelCase__ = getattr(A , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''intermediate''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowerCAmelCase__ = getattr(A , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowerCAmelCase__ = getattr(A , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCAmelCase__ = '''.'''.join(A )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , A ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , A ):
lowerCAmelCase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase__ = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase__ = torch.from_numpy(A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _snake_case ( A , A , A ) -> Any:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
lowerCAmelCase__ = BertConfig.from_json_file(A )
lowerCAmelCase__ = BertModel(A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(A , A , A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__UpperCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 90 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 91 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> Tuple:
if args.student_type == "roberta":
lowercase : List[Any] =False
elif args.student_type == "gpt2":
lowercase : Dict =False
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : int ) -> str:
if args.student_type == "roberta":
lowercase : str =False
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : int =argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__magic_name__ , required=__magic_name__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__magic_name__ , required=__magic_name__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__magic_name__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__magic_name__ , required=__magic_name__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__magic_name__ , type=__magic_name__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__magic_name__ , required=__magic_name__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__magic_name__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__magic_name__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=__magic_name__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__magic_name__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__magic_name__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__magic_name__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__magic_name__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__magic_name__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__magic_name__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__magic_name__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__magic_name__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__magic_name__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__magic_name__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__magic_name__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__magic_name__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__magic_name__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__magic_name__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__magic_name__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__magic_name__ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__magic_name__ , default=4000 , help='''Checkpoint interval.''' )
lowercase : Dict =parser.parse_args()
sanity_checks(__magic_name__ )
# ARGS #
init_gpu_params(__magic_name__ )
set_seed(__magic_name__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 )
git_log(args.dump_path )
lowercase , lowercase , lowercase : Optional[int] =MODEL_CLASSES[args.student_type]
lowercase , lowercase , lowercase : str =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase : Optional[Any] =teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase : Tuple ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase : List[str] =tokenizer.all_special_tokens.index(__magic_name__ )
lowercase : int =tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowercase : Optional[int] =special_tok_ids
lowercase : str =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
lowercase : List[str] =pickle.load(__magic_name__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
lowercase : Optional[Any] =pickle.load(__magic_name__ )
lowercase : Optional[int] =np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase : Optional[Any] =0.0 # do not predict special tokens
lowercase : Any =torch.from_numpy(__magic_name__ )
else:
lowercase : List[str] =None
lowercase : int =LmSeqsDataset(params=__magic_name__ , data=__magic_name__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowercase : Optional[int] =student_config_class.from_pretrained(args.student_config )
lowercase : List[Any] =True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowercase : Optional[int] =student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ )
else:
lowercase : int =student_model_class(__magic_name__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase : List[Any] =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__magic_name__ , __magic_name__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__magic_name__ , __magic_name__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase : Optional[Any] =Distiller(
params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 92 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__A = """scheduler_config.json"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = 1
__magic_name__ :Optional[Any] = 2
__magic_name__ :Optional[Any] = 3
__magic_name__ :List[Any] = 4
__magic_name__ :int = 5
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :jnp.ndarray
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Tuple = SCHEDULER_CONFIG_NAME
__magic_name__ :Dict = ["""dtype"""]
__magic_name__ :str = []
__magic_name__ :Tuple = True
@classmethod
def snake_case ( cls , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
if hasattr(__UpperCAmelCase , 'create_state' ) and getattr(__UpperCAmelCase , 'has_state' , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase ):
'''simple docstring'''
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case ( cls ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ :Any = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase__ :Union[str, Any] = [
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->jnp.ndarray:
"""simple docstring"""
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=jnp.floataa ) ->jnp.ndarray:
"""simple docstring"""
def alpha_bar(_SCREAMING_SNAKE_CASE ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCAmelCase__ :Tuple = []
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = i / num_diffusion_timesteps
lowerCAmelCase__ :List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ :Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ :Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ :Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ :str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
lowerCAmelCase__ :Optional[Any] = 1.0 - betas
lowerCAmelCase__ :Union[str, Any] = jnp.cumprod(__UpperCAmelCase , axis=0 )
return cls(
alphas=__UpperCAmelCase , betas=__UpperCAmelCase , alphas_cumprod=__UpperCAmelCase , )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = state.alphas_cumprod
lowerCAmelCase__ :Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ :Dict = sqrt_alpha_prod.flatten()
lowerCAmelCase__ :Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
lowerCAmelCase__ :Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ :Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ :Optional[int] = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :str = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 93 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE = 'src/transformers'
SCREAMING_SNAKE_CASE = 'docs/source/en/tasks'
def lowercase_ ( __A : Union[str, Any] , __A : str , __A : Any ) -> Tuple:
"""simple docstring"""
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Optional[int] =f.readlines()
# Find the start prompt.
lowercase : List[str] =0
while not lines[start_index].startswith(__A ):
start_index += 1
start_index += 1
lowercase : List[Any] =start_index
while not lines[end_index].startswith(__A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowercase_ ( __A : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Dict =TASK_GUIDE_TO_MODELS[task_guide]
lowercase : Tuple =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__A , set() )
lowercase : Optional[Any] ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowercase_ ( __A : Optional[int] , __A : Union[str, Any]=False ) -> str:
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase : List[str] =_find_text_in_file(
filename=os.path.join(__A , __A ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowercase : Dict =get_model_list_for_task(__A )
if current_list != new_list:
if overwrite:
with open(os.path.join(__A , __A ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 94 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=6.0 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int="fp4" , lowerCAmelCase_ : List[Any]=False , **lowerCAmelCase_ : str , ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = load_in_abit
UpperCAmelCase_ : Optional[Any] = load_in_abit
UpperCAmelCase_ : Union[str, Any] = llm_inta_threshold
UpperCAmelCase_ : Optional[Any] = llm_inta_skip_modules
UpperCAmelCase_ : Union[str, Any] = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_ : Optional[int] = llm_inta_has_fpaa_weight
UpperCAmelCase_ : Union[str, Any] = bnb_abit_quant_type
UpperCAmelCase_ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_ : List[Any] = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
UpperCAmelCase_ : str = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self.load_in_abit or self.load_in_abit
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Any:
UpperCAmelCase_ : str = cls(**lowerCAmelCase_ )
UpperCAmelCase_ : int = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] ) -> Optional[int]:
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
UpperCAmelCase_ : Any = self.to_dict()
UpperCAmelCase_ : Optional[int] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
writer.write(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict[str, Any]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Union[str, Any] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : List[str] ) -> List[Any]:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : bool = True ) -> str:
if use_diff is True:
UpperCAmelCase_ : Dict = self.to_diff_dict()
else:
UpperCAmelCase_ : Optional[Any] = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, Any]:
UpperCAmelCase_ : Dict = self.to_dict()
# get the default config dict
UpperCAmelCase_ : int = BitsAndBytesConfig().to_dict()
UpperCAmelCase_ : Dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_ : Any = value
return serializable_config_dict
| 95 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
__magic_name__: Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: int = self.dummy_uncond_unet
__magic_name__: int = PNDMScheduler()
__magic_name__: Dict = PNDMPipeline(unet=__snake_case , scheduler=__snake_case )
pndm.to(__snake_case )
pndm.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = torch.manual_seed(0 )
__magic_name__: List[str] = pndm(generator=__snake_case , num_inference_steps=2_0 , output_type="""numpy""" ).images
__magic_name__: Tuple = torch.manual_seed(0 )
__magic_name__: List[Any] = pndm(generator=__snake_case , num_inference_steps=2_0 , output_type="""numpy""" , return_dict=__snake_case )[0]
__magic_name__: Optional[Any] = image[0, -3:, -3:, -1]
__magic_name__: Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Any = """google/ddpm-cifar10-32"""
__magic_name__: List[str] = UNetaDModel.from_pretrained(__snake_case )
__magic_name__: Tuple = PNDMScheduler()
__magic_name__: Any = PNDMPipeline(unet=__snake_case , scheduler=__snake_case )
pndm.to(__snake_case )
pndm.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[int] = torch.manual_seed(0 )
__magic_name__: List[str] = pndm(generator=__snake_case , output_type="""numpy""" ).images
__magic_name__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: Union[str, Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 96 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: List[str] , snake_case__: List[Any] , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Tuple ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case__ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: int , snake_case__: Union[str, Any] , snake_case__: Optional[int]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: Dict ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 97 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=4 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = rotary_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(A_ )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(A_ )
SCREAMING_SNAKE_CASE__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FlaxGPTJModelTester(self )
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ , A_ , A_ , A_ )
@tooslow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE__ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=A_ , truncation=A_ )
SCREAMING_SNAKE_CASE__ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(A_ , A_ )
@is_pt_flax_cross_test
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(A_ , A_ )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(A_ , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = pt_model_class(A_ ).eval()
SCREAMING_SNAKE_CASE__ = model_class(A_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
SCREAMING_SNAKE_CASE__ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(A_ , from_pt=A_ )
SCREAMING_SNAKE_CASE__ = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(A_ , A_ )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(A_ , A_ )
SCREAMING_SNAKE_CASE__ = pt_model_class(A_ ).eval()
SCREAMING_SNAKE_CASE__ = model_class(A_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = pt_model_class.from_pretrained(A_ , from_flax=A_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 100 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE_ : str = '1'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE_ : str = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE_ : Optional[int] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE_ : Tuple = output_stream[0]
SCREAMING_SNAKE_CASE_ : str = np.frombuffer(A__, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a__ ( A__, A__, A__ = "f32le", ):
SCREAMING_SNAKE_CASE_ : int = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE_ : Any = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ : Tuple = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE_ : List[str] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE_ : Any = 'alsa'
SCREAMING_SNAKE_CASE_ : Tuple = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE_ : List[str] = 'avfoundation'
SCREAMING_SNAKE_CASE_ : Dict = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE_ : List[str] = 'dshow'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'default'
SCREAMING_SNAKE_CASE_ : List[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE_ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : List[str] = _ffmpeg_stream(A__, A__ )
for item in iterator:
yield item
def a__ ( A__, A__, A__ = None, A__ = None, A__ = "f32le", ):
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE_ : int = stream_chunk_s
else:
SCREAMING_SNAKE_CASE_ : Any = chunk_length_s
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ffmpeg_microphone(A__, A__, format_for_conversion=A__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ : List[Any] = np.intaa
SCREAMING_SNAKE_CASE_ : List[str] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ : str = np.floataa
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE_ : Any = chunk_length_s / 6
SCREAMING_SNAKE_CASE_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__, (int, float) ):
SCREAMING_SNAKE_CASE_ : Dict = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE_ : Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : Any = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : Any = datetime.datetime.now()
SCREAMING_SNAKE_CASE_ : Optional[Any] = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__, A__, stride=(stride_left, stride_right), stream=A__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.frombuffer(item['raw'], dtype=A__ )
SCREAMING_SNAKE_CASE_ : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE_ : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def a__ ( A__, A__, A__, A__ = False ):
SCREAMING_SNAKE_CASE_ : str = B''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
SCREAMING_SNAKE_CASE_ : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE_ : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE_ : int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE_ : Dict = False
yield item
SCREAMING_SNAKE_CASE_ : int = stride_left
SCREAMING_SNAKE_CASE_ : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
SCREAMING_SNAKE_CASE_ : int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE_ : Optional[int] = False
yield item
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = 2**2_4 # 16Mo
try:
with subprocess.Popen(A__, stdout=subprocess.PIPE, bufsize=A__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE_ : str = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 101 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = """"""
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
"""simple docstring"""
snake_case = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_snake_case = set()
# keep track of all the paths to be checked
_snake_case = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_snake_case = queue.pop(0 )
# get the last node from the path
_snake_case = path[-1]
if node not in explored:
_snake_case = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_snake_case = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_snake_case = [start]
_snake_case = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_snake_case = {start: 0, target: -1}
while queue:
_snake_case = queue.pop(0 )
if node == target:
_snake_case = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_snake_case = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 103 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : str, UpperCAmelCase_ : Any=1024 ) -> List[Any]:
"""simple docstring"""
A__ , A__ = [], []
A__ = list(zip(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ , A__ = sorted_examples[0]
def is_too_big(UpperCAmelCase_ : str ):
return tok(UpperCAmelCase_, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ = new_src + " " + src
A__ = new_tgt + " " + tgt
if is_too_big(UpperCAmelCase_ ) or is_too_big(UpperCAmelCase_ ): # cant fit, finalize example
finished_src.append(UpperCAmelCase_ )
finished_tgt.append(UpperCAmelCase_ )
A__ , A__ = src, tgt
else: # can fit, keep adding
A__ , A__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase_ )
finished_tgt.append(UpperCAmelCase_ )
return finished_src, finished_tgt
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Path, UpperCAmelCase_ : int, UpperCAmelCase_ : Dict ) -> str:
"""simple docstring"""
A__ = Path(UpperCAmelCase_ )
save_path.mkdir(exist_ok=UpperCAmelCase_ )
for split in ["train"]:
A__ , A__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
A__ = [x.rstrip() for x in Path(UpperCAmelCase_ ).open().readlines()]
A__ = [x.rstrip() for x in Path(UpperCAmelCase_ ).open().readlines()]
A__ , A__ = pack_examples(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
print(F"""packed {split} split from {len(UpperCAmelCase_ )} examples -> {len(UpperCAmelCase_ )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(UpperCAmelCase_ ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(UpperCAmelCase_ ) )
for split in ["val", "test"]:
A__ , A__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCAmelCase_, save_path / F"""{split}.source""" )
shutil.copyfile(UpperCAmelCase_, save_path / F"""{split}.target""" )
def _lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=UpperCAmelCase_, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=UpperCAmelCase_, default=128 )
parser.add_argument("--data_dir", type=UpperCAmelCase_ )
parser.add_argument("--save_path", type=UpperCAmelCase_ )
A__ = parser.parse_args()
A__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase_, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 104 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = ["image_processor", "tokenizer"]
__a : Tuple = "ChineseCLIPImageProcessor"
__a : List[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor
def __call__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) ,tensor_type=snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,snake_case__ ,)
return self.image_processor_class
| 105 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = XGLMTokenizer
A_ : Dict = XGLMTokenizerFast
A_ : Tuple = True
A_ : str = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1_008 )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
A = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def __UpperCamelCase ( self : Any ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCamelCase , f.name )
A = XGLMTokenizer(f.name , keep_accents=__UpperCamelCase )
A = pickle.dumps(__UpperCamelCase )
pickle.loads(__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Dict:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(__UpperCamelCase )
A = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
A = 'Hello World!'
A = [2, 31_227, 4_447, 35]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
A = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __UpperCamelCase ( self : List[Any] ) -> str:
# fmt: off
A = {
'input_ids': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='facebook/xglm-564M' , padding=__UpperCamelCase , ) | 106 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_UpperCAmelCase : List[str] = True
from torch.cuda.amp import autocast
_UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
__lowerCAmelCase = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
__lowerCAmelCase = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
__lowerCAmelCase = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def _SCREAMING_SNAKE_CASE ( __snake_case : ModelArguments , __snake_case : TrainingArguments ):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_A = logging.WARNING
if model_args.verbose_logging:
_A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_A = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowerCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__lowerCAmelCase = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__lowerCAmelCase = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "longest"
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self : int, UpperCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_A = self.feature_extractor.pad(
UpperCamelCase__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', )
_A = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
_A = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_A = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
_A = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_A = 1
_A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_A = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=UpperCamelCase__, min_masks=2, )
return batch
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], *UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any]=1, UpperCamelCase__ : List[str]=0, UpperCamelCase__ : int=1.0, **UpperCamelCase__ : Dict ) -> str:
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
_A = 0
_A = max_gumbel_temp
_A = min_gumbel_temp
_A = gumbel_temp_decay
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : nn.Module, UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
_A = self._prepare_inputs(UpperCamelCase__ )
if self.use_amp:
with autocast():
_A = self.compute_loss(UpperCamelCase__, UpperCamelCase__ )
else:
_A = self.compute_loss(UpperCamelCase__, UpperCamelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_A = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase__, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
_A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : str ):
# check that all files have the correct sampling rate
_A , _A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_A = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
_A = vectorized_datasets.filter(
lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : str ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_A = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
_A = WavaVecaForPreTraining(__snake_case )
_A = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
_A = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 107 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Optional[int] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''efficientnet'''
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(lowerCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1E-5 | 108 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
__SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = HubertForCTC(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertModel(__UpperCAmelCase )
if is_finetuned:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 109 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
lowercase_ : Optional[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the reference grid
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the action grid
_SCREAMING_SNAKE_CASE : Optional[Any] = init[0]
_SCREAMING_SNAKE_CASE : Optional[int] = init[1]
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_SCREAMING_SNAKE_CASE : int = [[f, g, x, y]]
_SCREAMING_SNAKE_CASE : Dict = False # flag that is set when search is complete
_SCREAMING_SNAKE_CASE : Optional[int] = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_SCREAMING_SNAKE_CASE : Optional[int] = cell.pop()
_SCREAMING_SNAKE_CASE : List[Any] = next_cell[2]
_SCREAMING_SNAKE_CASE : str = next_cell[3]
_SCREAMING_SNAKE_CASE : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
_SCREAMING_SNAKE_CASE : Any = True
else:
for i in range(len(lowerCamelCase__ ) ): # to try out different valid actions
_SCREAMING_SNAKE_CASE : Dict = x + DIRECTIONS[i][0]
_SCREAMING_SNAKE_CASE : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_SCREAMING_SNAKE_CASE : List[Any] = g + cost
_SCREAMING_SNAKE_CASE : Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : Optional[Any] = i
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : str = goal[0]
_SCREAMING_SNAKE_CASE : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_SCREAMING_SNAKE_CASE : int = x - DIRECTIONS[action[x][y]][0]
_SCREAMING_SNAKE_CASE : Dict = y - DIRECTIONS[action[x][y]][1]
_SCREAMING_SNAKE_CASE : str = xa
_SCREAMING_SNAKE_CASE : List[str] = ya
invpath.append([x, y] )
_SCREAMING_SNAKE_CASE : str = []
for i in range(len(lowerCamelCase__ ) ):
path.append(invpath[len(lowerCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase_ : Tuple = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase_ : str = [0, 0]
# all coordinates are given in format [y,x]
lowercase_ : List[str] = [len(grid) - 1, len(grid[0]) - 1]
lowercase_ : str = 1
# the cost map which pushes the path closer to the goal
lowercase_ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase_ : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase_ : List[Any] = 99
lowercase_ , lowercase_ : Any = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ : Optional[int] = 'scheduler_config.json'
class __UpperCamelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : List[str] = 2
_lowercase : Union[str, Any] = 3
_lowercase : Optional[int] = 4
_lowercase : Optional[int] = 5
@dataclass
class __UpperCamelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = 42
class __UpperCamelCase :
"""simple docstring"""
_lowercase : Dict = SCHEDULER_CONFIG_NAME
_lowercase : int = ['''dtype''']
_lowercase : Union[str, Any] = []
_lowercase : List[str] = True
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> List[str]:
a__ , a__ = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
a__ , a__ = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , '''create_state''' ) and getattr(_lowercase , '''has_state''' , _lowercase ):
a__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE ) -> str:
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def _UpperCAmelCase ( self ) -> int:
return self._get_compatibles()
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
a__ = list(set([cls.__name__] + cls._compatibles ) )
a__ = importlib.import_module(__name__.split('''.''' )[0] )
a__ = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
assert len(__UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCAmelCase ) - x.ndim) ) , __UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase=0.999 , __UpperCAmelCase=jnp.floataa ):
def alpha_bar(__UpperCAmelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a__ = []
for i in range(__UpperCAmelCase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCAmelCase ) / alpha_bar(__UpperCAmelCase ) , __UpperCAmelCase ) )
return jnp.array(__UpperCAmelCase , dtype=__UpperCAmelCase )
@flax.struct.dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : int = 42
_lowercase : Union[str, Any] = 42
_lowercase : str = 42
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE ) -> Any:
a__ = scheduler.config
if config.trained_betas is not None:
a__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
a__ = 1.0 - betas
a__ = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = state.alphas_cumprod
a__ = alphas_cumprod[timesteps] ** 0.5
a__ = sqrt_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(__UpperCAmelCase , original_samples.shape )
a__ = (1 - alphas_cumprod[timesteps]) ** 0.5
a__ = sqrt_one_minus_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(__UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ = get_sqrt_alpha_prod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ = get_sqrt_alpha_prod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 194 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case_ :
def __init__( self , a_ , a_=2 , a_=3_2 , a_=1_6 , a_=3 , a_=True , a_=True , a_=3_2 , a_=4 , a_=[0, 1, 2, 3] , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.02 , a_=3 , a_=[1, 3_8_4, 2_4, 2_4] , a_=True , a_=None , ):
a_ : str = parent
a_ : str = batch_size
a_ : str = image_size
a_ : int = patch_size
a_ : Any = num_channels
a_ : int = is_training
a_ : Any = use_labels
a_ : str = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : int = backbone_out_indices
a_ : Optional[int] = num_attention_heads
a_ : List[Any] = intermediate_size
a_ : int = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Tuple = num_labels
a_ : str = backbone_featmap_shape
a_ : Optional[Any] = scope
a_ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a_ : Optional[int] = (image_size // patch_size) ** 2
a_ : Optional[Any] = num_patches + 1
def snake_case_ ( self ):
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : List[str] = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ : int = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
a_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : List[Any] = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : str = self.num_labels
a_ : Dict = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Any = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : Any = self.num_labels
a_ : Union[str, Any] = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self ):
a_ : int = self.prepare_config_and_inputs()
a_ , a_ , a_ : List[Any] = config_and_inputs
a_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowercase_ ,lowercase_ ,unittest.TestCase ):
__lowerCAmelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowerCAmelCase = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case_ ( self ):
a_ : Optional[int] = DPTModelTester(self )
a_ : Dict = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def snake_case_ ( self ):
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Dict = model_class(_lowercase )
a_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : str = [*signature.parameters.keys()]
a_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def snake_case_ ( self ):
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def snake_case_ ( self ):
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def snake_case_ ( self ):
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def snake_case_ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
a_ : int = True
if model_class in get_values(_lowercase ):
continue
a_ : int = model_class(_lowercase )
model.to(_lowercase )
model.train()
a_ : int = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
a_ : Optional[Any] = model(**_lowercase ).loss
loss.backward()
def snake_case_ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = False
a_ : Optional[int] = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
a_ : Tuple = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
a_ : List[Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
a_ : Tuple = model(**_lowercase ).loss
loss.backward()
def snake_case_ ( self ):
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
a_ : Dict = model_class(config=_lowercase )
# Skip the check for the backbone
a_ : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a_ : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a_ : Optional[Any] = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def snake_case_ ( self ):
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[Any] = "add"
with self.assertRaises(_lowercase ):
a_ : List[Any] = DPTForDepthEstimation(_lowercase )
def lowerCAmelCase_ ( ) -> Dict:
a_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : Any = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
a_ : int = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowercase )
a_ : List[Any] = prepare_img()
a_ : str = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
a_ : Any = model(**_lowercase )
a_ : str = outputs.predicted_depth
# verify the predicted depth
a_ : Dict = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , _lowercase )
a_ : Optional[Any] = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , _lowercase , atol=1e-4 ) ) | 237 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case ( snake_case : Dict , snake_case : Dict , snake_case : Optional[Any] = 1 / sqrt(2 ) ) -> Any:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 - _cos) / 2
lowerCAmelCase = 1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : int = 1 / sqrt(2 ) ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 + _cos) / 2
lowerCAmelCase = -1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : str , snake_case : int , snake_case : Dict = 1 / sqrt(2 ) ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = _sin / 2
lowerCAmelCase = 0
lowerCAmelCase = -ba
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : str , snake_case : List[str] , snake_case : Union[str, Any] = 1 / sqrt(2 ) ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 1 - alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : Union[str, Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : List[Any] = 1 / sqrt(2 ) , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = 1 + alpha * big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha * big_a
lowerCAmelCase = 1 + alpha / big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha / big_a
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : str , snake_case : Dict , snake_case : Tuple , snake_case : List[Any] = 1 / sqrt(2 ) , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(snake_case ) * alpha
lowerCAmelCase = big_a * (pmc + aaa)
lowerCAmelCase = 2 * big_a * mpc
lowerCAmelCase = big_a * (pmc - aaa)
lowerCAmelCase = ppmc + aaa
lowerCAmelCase = -2 * pmpc
lowerCAmelCase = ppmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case ( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : List[Any] = 1 / sqrt(2 ) , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(snake_case )
lowerCAmelCase = cos(snake_case )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(snake_case ) * alpha
lowerCAmelCase = big_a * (ppmc + aaa)
lowerCAmelCase = -2 * big_a * pmpc
lowerCAmelCase = big_a * (ppmc - aaa)
lowerCAmelCase = pmc + aaa
lowerCAmelCase = 2 * mpc
lowerCAmelCase = pmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 284 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__a = """huggingface/label-files"""
__a = """imagenet-1k-id2label.json"""
__a = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__a = BitConfig(
conv_layer=__SCREAMING_SNAKE_CASE , num_labels=1000 , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , )
return config
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if "stem.conv" in name:
__a = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__a = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__a = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__a = """bit.encoder.""" + name
return name
def __lowercase ( ) -> List[Any]:
"""simple docstring"""
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
__a = get_config(__SCREAMING_SNAKE_CASE )
# load original model from timm
__a = create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model
__a = timm_model.state_dict()
for key in state_dict.copy().keys():
__a = state_dict.pop(__SCREAMING_SNAKE_CASE )
__a = val.squeeze() if """head""" in key else val
# load HuggingFace model
__a = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# create image processor
__a = create_transform(**resolve_data_config({} , model=__SCREAMING_SNAKE_CASE ) )
__a = transform.transforms
__a = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__a = BitImageProcessor(
do_resize=__SCREAMING_SNAKE_CASE , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__a = prepare_img()
__a = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
__a = processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE )
__a = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__a = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = len(lowercase )
lowercase_ : Optional[Any] = [[0] * n for i in range(lowercase )]
for i in range(lowercase ):
lowercase_ : int = y_points[i]
for i in range(2 , lowercase ):
for j in range(lowercase , lowercase ):
lowercase_ : List[str] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 458 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Any = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , A__ )
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
a_ ="""marian"""
a_ =["""past_key_values"""]
a_ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , _a : List[Any]=5_8101 , _a : int=None , _a : Tuple=1024 , _a : int=12 , _a : List[Any]=4096 , _a : Optional[int]=16 , _a : Optional[Any]=12 , _a : Any=4096 , _a : Union[str, Any]=16 , _a : str=0.0 , _a : Dict=0.0 , _a : List[Any]=True , _a : List[Any]=True , _a : Union[str, Any]="gelu" , _a : Tuple=1024 , _a : int=0.1 , _a : Optional[int]=0.0 , _a : Union[str, Any]=0.0 , _a : Union[str, Any]=0.02 , _a : Optional[int]=5_8100 , _a : Dict=False , _a : int=5_8100 , _a : Union[str, Any]=0 , _a : int=0 , _a : str=True , **_a : List[Any] , ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Union[str, Any] = decoder_vocab_size or vocab_size
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Optional[Any] = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : Tuple = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = dropout
__lowerCamelCase : Dict = attention_dropout
__lowerCamelCase : Optional[int] = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : List[str] = init_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = decoder_layerdrop
__lowerCamelCase : Any = use_cache
__lowerCamelCase : Tuple = encoder_layers
__lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowercase ( self : Optional[int] ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase : str = {0: 'batch'}
__lowerCamelCase : int = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowerCamelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__lowerCamelCase : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase ,__lowerCamelCase : Dict = self.num_layers
for i in range(_lowercase ):
__lowerCamelCase : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCamelCase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowerCamelCase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowercase ( self : Any ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : List[str] = super().outputs
else:
__lowerCamelCase : str = super(_lowercase , self ).outputs
if self.use_past:
__lowerCamelCase ,__lowerCamelCase : Tuple = self.num_layers
for i in range(_lowercase ):
__lowerCamelCase : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCamelCase : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _lowercase ( self : List[Any] , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> List[str]:
__lowerCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
__lowerCamelCase : Union[str, Any] = seq_length if not self.use_past else 1
__lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
__lowerCamelCase : Dict = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : List[str] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase ,__lowerCamelCase : Any = common_inputs['input_ids'].shape
__lowerCamelCase : Optional[Any] = common_inputs['decoder_input_ids'].shape[1]
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.num_attention_heads
__lowerCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict = decoder_seq_length + 3
__lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : int = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowercase , _lowercase )] , dim=1 )
__lowerCamelCase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase ,__lowerCamelCase : Optional[int] = self.num_layers
__lowerCamelCase : Tuple = min(_lowercase , _lowercase )
__lowerCamelCase : str = max(_lowercase , _lowercase ) - min_num_layers
__lowerCamelCase : List[Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
__lowerCamelCase : Union[str, Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def _lowercase ( self : str , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Dict:
__lowerCamelCase : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase ,__lowerCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCamelCase : Union[str, Any] = seqlen + 2
__lowerCamelCase ,__lowerCamelCase : Any = self.num_layers
__lowerCamelCase ,__lowerCamelCase : List[Any] = self.num_attention_heads
__lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict = common_inputs['attention_mask'].dtype
__lowerCamelCase : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
__lowerCamelCase : Tuple = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def _lowercase ( self : Tuple , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> Any:
__lowerCamelCase : List[Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : int = tokenizer.num_special_tokens_to_add(_lowercase )
__lowerCamelCase : Optional[int] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : List[str] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def _lowercase ( self : str , _a : PreTrainedTokenizer , _a : int = -1 , _a : int = -1 , _a : bool = False , _a : Optional[TensorType] = None , ) -> str:
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
__lowerCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def _lowercase ( self : Tuple , _a : Tuple , _a : Optional[int] , _a : Dict , _a : Tuple ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
__lowerCamelCase : str = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
return 1e-4
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _snake_case ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , __snake_case : Dict , __snake_case : str=True , __snake_case : List[Any]="pt" ):
"""simple docstring"""
_lowerCamelCase : str = {"""add_prefix_space""": True} if isinstance(__snake_case , __snake_case ) and not line.startswith(""" """ ) else {}
_lowerCamelCase : Optional[int] = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding="""max_length""" if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def _snake_case ( __snake_case : List[str] , __snake_case : str , __snake_case : int=None , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowercase__ ( lowercase_ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="train" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , ) -> Tuple:
super().__init__()
_lowerCamelCase : Dict = Path(_lowercase).joinpath(type_path + """.source""")
_lowerCamelCase : List[str] = Path(_lowercase).joinpath(type_path + """.target""")
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
_lowerCamelCase : str = max_source_length
_lowerCamelCase : Optional[int] = max_target_length
assert min(self.src_lens) > 0, F'found empty line in {self.src_file}'
_lowerCamelCase : str = tokenizer
_lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
_lowerCamelCase : Optional[int] = self.src_lens[:n_obs]
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : int = tgt_lang
def __len__( self) -> Dict:
return len(self.src_lens)
def __getitem__( self , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : List[Any] = index + 1 # linecache starts at 1
_lowerCamelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file) , _lowercase).rstrip("""\n""")
_lowerCamelCase : Dict = linecache.getline(str(self.tgt_file) , _lowercase).rstrip("""\n""")
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase) else self.tokenizer
)
_lowerCamelCase : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase) else self.tokenizer
_lowerCamelCase : Optional[Any] = encode_line(_lowercase , _lowercase , self.max_source_length , """right""")
_lowerCamelCase : Dict = encode_line(_lowercase , _lowercase , self.max_target_length , """right""")
_lowerCamelCase : str = source_inputs["""input_ids"""].squeeze()
_lowerCamelCase : str = target_inputs["""input_ids"""].squeeze()
_lowerCamelCase : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE) -> Dict:
return [len(_lowercase) for x in Path(_lowercase).open().readlines()]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : Union[str, Any] = torch.stack([x["""input_ids"""] for x in batch])
_lowerCamelCase : str = torch.stack([x["""attention_mask"""] for x in batch])
_lowerCamelCase : str = torch.stack([x["""decoder_input_ids"""] for x in batch])
_lowerCamelCase : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Any = trim_batch(_lowercase , _lowercase)
_lowerCamelCase , _lowerCamelCase : Tuple = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase)
_lowerCamelCase : int = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def _snake_case ( __snake_case : Union[str, Any] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__snake_case ) )
def _snake_case ( __snake_case : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , """git_log.json""" ) )
def _snake_case ( __snake_case : int , __snake_case : str , __snake_case : Union[str, Any]=4 , **__snake_case : Optional[int] ):
"""simple docstring"""
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
with open(__snake_case ) as f:
return json.load(__snake_case )
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : str = git.Repo(search_parent_directories=__snake_case )
_lowerCamelCase : Optional[int] = {
"""repo_id""": str(__snake_case ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
"""simple docstring"""
return list(map(__snake_case , __snake_case ) )
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[Any] ):
"""simple docstring"""
with open(__snake_case , """wb""" ) as f:
return pickle.dump(__snake_case , __snake_case )
def _snake_case ( __snake_case : List[Any] ):
"""simple docstring"""
def remove_articles(__snake_case : Union[str, Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , __snake_case )
def white_space_fix(__snake_case : Tuple ):
return " ".join(text.split() )
def remove_punc(__snake_case : int ):
_lowerCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _snake_case ( __snake_case : Any , __snake_case : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = normalize_answer(__snake_case ).split()
_lowerCamelCase : List[str] = normalize_answer(__snake_case ).split()
_lowerCamelCase : int = Counter(__snake_case ) & Counter(__snake_case )
_lowerCamelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : Union[str, Any] = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : Union[str, Any] = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : Dict = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
"""simple docstring"""
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def _snake_case ( __snake_case : int , __snake_case : Any ):
"""simple docstring"""
assert len(__snake_case ) == len(__snake_case )
_lowerCamelCase : List[Any] = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def _snake_case ( __snake_case : Tuple , __snake_case : Dict , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : int = """dropout_rate"""
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
_lowerCamelCase : List[Any] = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 88 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A_ : Union[str, Any] = 'CompVis/stable-diffusion-v1-1'
A_ : Tuple = 'CompVis/stable-diffusion-v1-2'
A_ : Any = 'CompVis/stable-diffusion-v1-3'
A_ : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
super()._init_()
UpperCamelCase_: int = StableDiffusionPipeline.from_pretrained(_lowercase )
UpperCamelCase_: Optional[Any] = StableDiffusionPipeline.from_pretrained(_lowercase )
UpperCamelCase_: Optional[int] = StableDiffusionPipeline.from_pretrained(_lowercase )
UpperCamelCase_: Optional[int] = StableDiffusionPipeline(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , requires_safety_checker=_lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _a ( self ):
return {k: getattr(self , _lowercase ) for k in self.config.keys() if not k.startswith('_' )}
def _a ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_: Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def _a ( self ):
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
UpperCamelCase_: List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(_lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase_: List[str] = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase_: Optional[int] = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase_: int = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase_: Any = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 57 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : Union[str, Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n'
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=8 ) -> List[str]:
lowerCamelCase_ : Tuple =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ : Tuple =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase__ ( lowercase_ ):
def __init__( self : List[str] , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowerCamelCase_ : Union[str, Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str ):
if latents is None:
lowerCamelCase_ : List[Any] =randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ : Optional[Any] =latents.to(_lowercase )
lowerCamelCase_ : Any =latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : int , snake_case__ : int=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ : Any =torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase_ : Any =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Tuple=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCamelCase_ : Union[str, Any] =torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ : Tuple =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowerCamelCase_ : Tuple =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Optional[int] ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self : int , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 100 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
lowerCamelCase_ : Tuple =self._execution_device
lowerCamelCase_ : Union[str, Any] =guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : str =torch.cat(_lowercase , dim=0 )
lowerCamelCase_ : int =image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : Optional[Any] =torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ : Tuple =image_embeds.repeat_interleave(_lowercase , dim=0 )
lowerCamelCase_ : int =negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowerCamelCase_ : Tuple =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowerCamelCase_ : Optional[Any] =self.scheduler.timesteps
lowerCamelCase_ : Optional[Any] =self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ : List[Any] =downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ : Optional[Any] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : Tuple =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Tuple ={"image_embeds": image_embeds}
lowerCamelCase_ : str =self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : Any =noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ : List[str] =variance_pred.chunk(2 )
lowerCamelCase_ : int =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ : int =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ : str =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[str] =self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowerCamelCase_ : Optional[int] =self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ : List[Any] =image * 0.5 + 0.5
lowerCamelCase_ : List[Any] =image.clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ : Union[str, Any] =self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 153 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase__ )
# We need to create solution object to save path.
_SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
_SCREAMING_SNAKE_CASE : Optional[Any] = run_maze(lowerCamelCase__, 0, 0, lowerCamelCase__ )
if solved:
print("\n".join(str(lowerCamelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase__ )
# Final check point.
if i == j == (size - 1):
_SCREAMING_SNAKE_CASE : Tuple = 1
return True
_SCREAMING_SNAKE_CASE : Dict = (not i < 0) and (not j < 0) # Check lower bounds
_SCREAMING_SNAKE_CASE : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_SCREAMING_SNAKE_CASE : List[Any] = 1
# check for directions
if (
run_maze(lowerCamelCase__, i + 1, lowerCamelCase__, lowerCamelCase__ )
or run_maze(lowerCamelCase__, lowerCamelCase__, j + 1, lowerCamelCase__ )
or run_maze(lowerCamelCase__, i - 1, lowerCamelCase__, lowerCamelCase__ )
or run_maze(lowerCamelCase__, lowerCamelCase__, j - 1, lowerCamelCase__ )
):
return True
_SCREAMING_SNAKE_CASE : List[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def __a ( __UpperCAmelCase , __UpperCAmelCase=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
a__ = n - 1
a__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
a__ = 0
while count < prec:
a__ = random.randint(2 , n - 1 )
a__ = bin_exp_mod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if b != 1:
a__ = True
for _ in range(__UpperCAmelCase ):
if b == n - 1:
a__ = False
break
a__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ : Optional[Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 194 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE_ = 12_80_22
SCREAMING_SNAKE_CASE_ = 12_80_28
@require_sentencepiece
class snake_case_ ( lowercase_ ,unittest.TestCase ):
__lowerCAmelCase = MaMaaaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : Optional[int] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a_ : Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
a_ : Optional[Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
a_ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , **a_ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def snake_case_ ( self , a_ ):
return (
"This is a test",
"This is a test",
)
def snake_case_ ( self ):
a_ : int = "</s>"
a_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def snake_case_ ( self ):
a_ : Union[str, Any] = self.get_tokenizer()
a_ : List[str] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(_lowercase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ : Optional[Any] = self.get_tokenizer()
a_ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2, 3, 4, 5, 6] , )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
a_ : Tuple = tokenizer.convert_tokens_to_string(_lowercase )
self.assertEqual(_lowercase , "This is a test" )
@slow
def snake_case_ ( self ):
a_ : Optional[int] = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
__lowerCAmelCase = "facebook/m2m100_418M"
__lowerCAmelCase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__lowerCAmelCase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L\'affaire NSA souligne l\'absence totale de débat sur le renseignement",
]
# fmt: off
__lowerCAmelCase = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def snake_case_ ( cls ):
a_ : Tuple = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
a_ : Optional[int] = 1
return cls
def snake_case_ ( self ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def snake_case_ ( self ):
a_ : str = self.tokenizer.get_vocab()
self.assertEqual(len(_lowercase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , _lowercase )
def snake_case_ ( self ):
a_ : Optional[Any] = "en"
a_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def snake_case_ ( self ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
# fmt: off
a_ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
a_ : Tuple = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
a_ : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def snake_case_ ( self ):
a_ : List[Any] = tempfile.mkdtemp()
a_ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_lowercase )
a_ : List[Any] = MaMaaaTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.lang_token_to_id , _lowercase )
@require_torch
def snake_case_ ( self ):
a_ : Union[str, Any] = "en"
a_ : str = "fr"
a_ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors="pt" )
a_ : List[Any] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
a_ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case_ ( self ):
a_ : int = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
a_ : Union[str, Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def snake_case_ ( self ):
a_ : List[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
a_ : Dict = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case_ ( self ):
a_ : List[str] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , ) | 237 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : Any = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = ["PoolFormerFeatureExtractor"]
_UpperCamelCase : Tuple = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 284 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=1_8 , SCREAMING_SNAKE_CASE__ : int=3_0 , SCREAMING_SNAKE_CASE__ : List[str]=4_0_0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""shortest_edge""": 1_8}
__a = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def __a ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
a_ :Optional[Any] =LevitImageProcessor if is_vision_available() else None
def __a ( self : str ):
'''simple docstring'''
__a = LevitImageProcessingTester(self )
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def __a ( self : Any ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 582 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ , lowercase_ ):
'''simple docstring'''
__a : Union[str, Any] = """maskformer-swin"""
__a : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self, snake_case__=2_24, snake_case__=4, snake_case__=3, snake_case__=96, snake_case__=[2, 2, 6, 2], snake_case__=[3, 6, 12, 24], snake_case__=7, snake_case__=4.0, snake_case__=True, snake_case__=0.0, snake_case__=0.0, snake_case__=0.1, snake_case__="gelu", snake_case__=False, snake_case__=0.02, snake_case__=1E-5, snake_case__=None, snake_case__=None, **snake_case__, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_lowercase )
lowercase_ : Tuple = image_size
lowercase_ : str = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Optional[Any] = embed_dim
lowercase_ : Optional[Any] = depths
lowercase_ : Tuple = len(_lowercase )
lowercase_ : Any = num_heads
lowercase_ : int = window_size
lowercase_ : List[str] = mlp_ratio
lowercase_ : List[str] = qkv_bias
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = drop_path_rate
lowercase_ : Dict = hidden_act
lowercase_ : List[Any] = use_absolute_embeddings
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : int = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
lowercase_ : List[str] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1, len(_lowercase ) + 1 )]
lowercase_ , lowercase_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowercase, out_indices=_lowercase, stage_names=self.stage_names ) | 458 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A : Optional[int] = logging.getLogger(__name__)
def UpperCamelCase_ ( A__ : str , A__ : Union[str, Any] ):
'''simple docstring'''
if os.path.exists(A__ ):
if os.path.exists(os.path.join(A__ , """config.json""" ) ) and os.path.isfile(
os.path.join(A__ , """config.json""" ) ):
os.remove(os.path.join(A__ , """config.json""" ) )
if os.path.exists(os.path.join(A__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(A__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(A__ , """pytorch_model.bin""" ) )
else:
os.makedirs(A__ )
model.save_pretrained(A__ )
def UpperCamelCase_ ( A__ : str , A__ : Tuple=False ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 2
if unlogit:
lowerCAmelCase_ : Optional[int] = torch.pow(A__ , A__ )
lowerCAmelCase_ : Union[str, Any] = p * torch.log(A__ )
lowerCAmelCase_ : List[str] = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(f'{x + 1}' for x in range(len(A__ ) ) ) )
for row in range(len(A__ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCamelCase_ ( A__ : int , A__ : Tuple , A__ : Optional[Any] , A__ : List[Any]=True , A__ : str=True , A__ : Union[str, Any]=None , A__ : Dict=False ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase_ : Optional[Any] = torch.zeros(A__ , A__ ).to(args.device )
lowerCAmelCase_ : Optional[int] = torch.zeros(A__ , A__ ).to(args.device )
if head_mask is None:
lowerCAmelCase_ : Dict = torch.ones(A__ , A__ ).to(args.device )
head_mask.requires_grad_(requires_grad=A__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[Any] = 0.0
lowerCAmelCase_ : str = 0.0
for step, inputs in enumerate(tqdm(A__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase_ : Dict = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase_ ), ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase_ : Dict = model(A__ , labels=A__ , head_mask=A__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Tuple = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A__ ):
lowerCAmelCase_ : Optional[Any] = entropy(attn.detach() , A__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase_ : Optional[Any] = 2
lowerCAmelCase_ : int = torch.pow(torch.pow(A__ , A__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCAmelCase_ : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(A__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(A__ )
logger.info("""Head ranked by importance scores""" )
lowerCAmelCase_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase_ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase_ : List[str] = head_ranks.view_as(A__ )
print_ad_tensor(A__ )
return attn_entropy, head_importance, total_loss
def UpperCamelCase_ ( A__ : List[Any] , A__ : int , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[str] = compute_heads_importance(A__ , A__ , A__ , compute_entropy=A__ )
lowerCAmelCase_ : str = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , A__ , original_score * args.masking_threshold )
lowerCAmelCase_ : Any = torch.ones_like(A__ )
lowerCAmelCase_ : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase_ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase_ : List[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase_ : List[str] = float("""Inf""" )
lowerCAmelCase_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCAmelCase_ : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase_ : str = new_head_mask.view(-1 )
lowerCAmelCase_ : Optional[Any] = 0.0
lowerCAmelCase_ : Any = new_head_mask.view_as(A__ )
lowerCAmelCase_ : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(A__ )
# Compute metric and head importance again
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Tuple = compute_heads_importance(
A__ , A__ , A__ , compute_entropy=A__ , head_mask=A__ )
lowerCAmelCase_ : Optional[Any] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , A__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(A__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase_ ( A__ : Any , A__ : List[Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = datetime.now()
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = compute_heads_importance(
A__ , A__ , A__ , compute_entropy=A__ , compute_importance=A__ , head_mask=A__ )
lowerCAmelCase_ : List[Any] = 1 / loss
lowerCAmelCase_ : int = datetime.now() - before_time
lowerCAmelCase_ : Dict = sum(p.numel() for p in model.parameters() )
lowerCAmelCase_ : Optional[int] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A__ , A__ ):
lowerCAmelCase_ : Tuple = [
v,
]
assert sum(len(A__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A__ )
lowerCAmelCase_ : Optional[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase_ : Dict = datetime.now()
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : int = compute_heads_importance(
A__ , A__ , A__ , compute_entropy=A__ , compute_importance=A__ , head_mask=A__ , actually_pruned=A__ , )
lowerCAmelCase_ : Union[str, Any] = 1 / loss
lowerCAmelCase_ : Union[str, Any] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , A__ , A__ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , A__ , A__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(A__ , args.output_dir )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=A__ , type=A__ , required=A__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=A__ , type=A__ , required=A__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=A__ , type=A__ , required=A__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=A__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=A__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=A__ , type=A__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=A__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=A__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=A__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=A__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=A__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=A__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=A__ , default=42 )
parser.add_argument("""--local_rank""" , type=A__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=A__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=A__ , default="""""" , help="""Can be used for distant debugging.""" )
lowerCAmelCase_ : int = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase_ : int = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCAmelCase_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase_ : List[str] = torch.device("""cuda""" , args.local_rank )
lowerCAmelCase_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase_ : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase_ : Optional[Any] = nn.parallel.DistributedDataParallel(
A__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A__ )
elif args.n_gpu > 1:
lowerCAmelCase_ : str = nn.DataParallel(A__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A__ )
torch.save(A__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , A__ )
# Prepare dataset
lowerCAmelCase_ : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase_ : List[Any] = (torch.from_numpy(A__ ),)
lowerCAmelCase_ : Optional[int] = TensorDataset(*A__ )
lowerCAmelCase_ : Optional[Any] = RandomSampler(A__ )
lowerCAmelCase_ : Union[str, Any] = DataLoader(A__ , sampler=A__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A__ , A__ , A__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase_ : str = mask_heads(A__ , A__ , A__ )
prune_heads(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 275 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCamelCase_ ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , _a : int = 128 , _a : int = 256 , _a : float = 2000.0 , _a : int = 768 , _a : int = 12 , _a : int = 12 , _a : int = 64 , _a : int = 2048 , _a : float = 0.1 , ) -> Optional[Any]:
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
__lowerCamelCase : List[Any] = nn.Embedding(_lowercase , _lowercase )
__lowerCamelCase : str = False
__lowerCamelCase : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__lowerCamelCase : str = nn.Dropout(p=_lowercase )
__lowerCamelCase : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
__lowerCamelCase : Any = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
__lowerCamelCase : List[Any] = TaLayerNorm(_lowercase )
__lowerCamelCase : Dict = nn.Dropout(p=_lowercase )
__lowerCamelCase : Tuple = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def _lowercase ( self : List[Any] , _a : Any , _a : Optional[Any] ) -> List[Any]:
__lowerCamelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _lowercase ( self : List[Any] , _a : Dict , _a : int , _a : List[Any] ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Any = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : List[str] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : List[str] = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Any = self.position_encoding(_lowercase )
__lowerCamelCase : List[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
__lowerCamelCase : Optional[int] = self.dropout(_lowercase )
# decoder: No padding present.
__lowerCamelCase : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
__lowerCamelCase : Union[str, Any] = self.decoder_norm(_lowercase )
__lowerCamelCase : Union[str, Any] = self.post_dropout(_lowercase )
__lowerCamelCase : Tuple = self.spec_out(_lowercase )
return spec_out
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _a : List[str] , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : Union[str, Any] , _a : Tuple=1e-6 ) -> Optional[int]:
super().__init__()
__lowerCamelCase : Dict = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def _lowercase ( self : Dict , _a : Optional[Any] , _a : Optional[Any]=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=None , ) -> int:
__lowerCamelCase : Optional[Any] = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Optional[Any] = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[str] = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _a : List[str] , _a : Union[str, Any] , _a : Optional[Any] , _a : List[str] ) -> Dict:
super().__init__()
__lowerCamelCase : List[Any] = TaLayerNorm(_lowercase )
__lowerCamelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
__lowerCamelCase : str = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
__lowerCamelCase : Optional[Any] = nn.Dropout(_lowercase )
def _lowercase ( self : Any , _a : Optional[int] , _a : Any=None , _a : Tuple=None , ) -> Optional[int]:
__lowerCamelCase : Tuple = self.layer_norm(_lowercase )
if conditioning_emb is not None:
__lowerCamelCase : Any = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(_lowercase )
__lowerCamelCase : Union[str, Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _a : List[Any] , _a : str , _a : List[str] , _a : Any , _a : List[str] ) -> Any:
super().__init__()
__lowerCamelCase : Optional[int] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
__lowerCamelCase : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
__lowerCamelCase : List[str] = nn.Dropout(_lowercase )
def _lowercase ( self : Optional[int] , _a : Optional[int] , _a : Tuple=None , _a : Tuple=None , ) -> Optional[Any]:
__lowerCamelCase : int = self.layer_norm(_lowercase )
__lowerCamelCase : Dict = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : int = hidden_states + self.dropout(_lowercase )
return layer_output
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _a : int , _a : Optional[int] , _a : Union[str, Any] , _a : int ) -> str:
super().__init__()
__lowerCamelCase : Optional[int] = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
__lowerCamelCase : List[str] = TaLayerNorm(_lowercase , eps=_lowercase )
__lowerCamelCase : Tuple = nn.Dropout(_lowercase )
def _lowercase ( self : int , _a : Union[str, Any] , _a : Any=None ) -> int:
__lowerCamelCase : Optional[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
__lowerCamelCase : int = self.film(_lowercase , _lowercase )
__lowerCamelCase : Optional[int] = self.DenseReluDense(_lowercase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _a : Any , _a : Optional[int] , _a : Optional[int] ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__lowerCamelCase : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__lowerCamelCase : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__lowerCamelCase : Dict = nn.Dropout(_lowercase )
__lowerCamelCase : Tuple = NewGELUActivation()
def _lowercase ( self : Optional[int] , _a : List[Any] ) -> int:
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(_lowercase ) )
__lowerCamelCase : Any = self.wi_a(_lowercase )
__lowerCamelCase : Any = hidden_gelu * hidden_linear
__lowerCamelCase : List[str] = self.dropout(_lowercase )
__lowerCamelCase : List[str] = self.wo(_lowercase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _a : str , _a : Optional[Any]=1e-6 ) -> Dict:
super().__init__()
__lowerCamelCase : Tuple = nn.Parameter(torch.ones(_lowercase ) )
__lowerCamelCase : List[str] = eps
def _lowercase ( self : Dict , _a : Tuple ) -> Optional[Any]:
__lowerCamelCase : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
__lowerCamelCase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def _lowercase ( self : Tuple , _a : torch.Tensor ) -> Tuple:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_lowercase , 3.0 )) ))
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , _a : Optional[int] , _a : int ) -> int:
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def _lowercase ( self : Optional[Any] , _a : List[str] , _a : int ) -> Optional[int]:
__lowerCamelCase : Dict = self.scale_bias(_lowercase )
__lowerCamelCase ,__lowerCamelCase : Optional[int] = torch.chunk(_lowercase , 2 , -1 )
__lowerCamelCase : Any = x * (1 + scale) + shift
return x
| 459 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( lowercase_ ):
__UpperCAmelCase = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=1_6000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase)
_lowerCamelCase : Tuple = num_mel_bins
_lowerCamelCase : Optional[Any] = do_ceptral_normalize
_lowerCamelCase : Tuple = normalize_means
_lowerCamelCase : str = normalize_vars
_lowerCamelCase : int = True
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , ) -> str:
_lowerCamelCase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_lowerCamelCase : Tuple = torch.from_numpy(_lowercase).unsqueeze(0)
_lowerCamelCase : Dict = ta_kaldi.fbank(_lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> Tuple:
if normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0)
_lowerCamelCase : Tuple = np.subtract(_lowercase , _lowercase)
if normalize_vars:
_lowerCamelCase : Union[str, Any] = x[:input_length].std(axis=0)
_lowerCamelCase : str = np.divide(_lowercase , _lowercase)
if input_length < x.shape[0]:
_lowerCamelCase : Union[str, Any] = padding_value
# make sure array is in float32
_lowerCamelCase : List[Any] = x.astype(np.floataa)
return x
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple:
_lowerCamelCase : Optional[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowercase , _lowercase , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(_lowercase , _lowercase)
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Any:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
_lowerCamelCase : Any = isinstance(_lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
_lowerCamelCase : List[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCamelCase : int = [np.asarray(_lowercase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray):
_lowerCamelCase : int = np.asarray(_lowercase , dtype=np.floataa)
elif isinstance(_lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCamelCase : int = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCamelCase : Optional[Any] = [raw_speech]
# extract fbank features
_lowerCamelCase : Optional[int] = [self._extract_fbank_features(_lowercase) for waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : int = BatchFeature({"""input_features""": features})
_lowerCamelCase : List[Any] = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
_lowerCamelCase : Union[str, Any] = padded_inputs.get("""input_features""")
if isinstance(input_features[0] , _lowercase):
_lowerCamelCase : int = [np.asarray(_lowercase , dtype=np.floataa) for feature in input_features]
_lowerCamelCase : Optional[int] = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
_lowerCamelCase : List[str] = [np.asarray(_lowercase , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_lowerCamelCase : Dict = (
np.array(_lowercase , dtype=np.intaa)
if self._get_padding_strategies(_lowercase , max_length=_lowercase) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase)
if return_tensors is not None:
_lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(_lowercase)
return padded_inputs
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
A_ : Tuple = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case () -> Any:
UpperCamelCase_: List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase_: str = bs[:]
UpperCamelCase_: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCamelCase_: Any = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
def snake_case (UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: Dict = set()
UpperCamelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_: str = char
return pairs
class _lowerCAmelCase( lowercase_ ):
"""simple docstring"""
a : List[str] =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCamelCase_: Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
UpperCamelCase_: List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
UpperCamelCase_: Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
UpperCamelCase_: Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
UpperCamelCase_: Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
UpperCamelCase_: Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_: List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_: str = json.load(_lowercase )
UpperCamelCase_: Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: List[Any] = errors # how to handle errors in decoding
UpperCamelCase_: Tuple = bytes_to_unicode()
UpperCamelCase_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
UpperCamelCase_: Dict = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase_: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_: Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCamelCase_: List[Any] = {}
UpperCamelCase_: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_: int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _a ( self ):
return len(self.encoder )
def _a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
UpperCamelCase_: Optional[Any] = tuple(_lowercase )
UpperCamelCase_: List[Any] = get_pairs(_lowercase )
if not pairs:
return token
while True:
UpperCamelCase_: Optional[Any] = min(_lowercase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ ,UpperCamelCase_: List[Any] = bigram
UpperCamelCase_: int = []
UpperCamelCase_: Union[str, Any] = 0
while i < len(_lowercase ):
try:
UpperCamelCase_: List[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_: Union[str, Any] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_: List[Any] = tuple(_lowercase )
UpperCamelCase_: str = new_word
if len(_lowercase ) == 1:
break
else:
UpperCamelCase_: Any = get_pairs(_lowercase )
UpperCamelCase_: Optional[int] = ' '.join(_lowercase )
UpperCamelCase_: Dict = word
return word
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[Any] = []
for token in re.findall(self.pat , _lowercase ):
UpperCamelCase_: Tuple = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(' ' ) )
return bpe_tokens
def _a ( self , _lowerCamelCase ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _a ( self , _lowerCamelCase ):
return self.decoder.get(_lowercase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Tuple = ''.join(_lowercase )
UpperCamelCase_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: List[str] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
UpperCamelCase_: int = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase_: List[Any] = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
UpperCamelCase_: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
UpperCamelCase_: Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
UpperCamelCase_: Optional[int] = ' ' + text
return (text, kwargs)
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
return token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
UpperCamelCase_: Dict = ' '.join(_lowercase )
UpperCamelCase_: Optional[int] = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
UpperCamelCase_: Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
import numpy as np
import datasets
A__ : List[str] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
A__ : Optional[int] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
A__ : Dict = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
lowerCamelCase_ : Dict =np.array(_lowercase )
lowerCamelCase_ : Dict =np.array(_lowercase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
lowerCamelCase_ : Optional[Any] =X - np.mean(_lowercase )
lowerCamelCase_ : Optional[int] =np.cov(reference_distribution.T )
try:
lowerCamelCase_ : Dict =np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
lowerCamelCase_ : Optional[Any] =np.linalg.pinv(_lowercase )
lowerCamelCase_ : List[Any] =np.dot(_lowercase , _lowercase )
lowerCamelCase_ : Union[str, Any] =np.dot(_lowercase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 153 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
from collections import deque
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = process_name # process name
_SCREAMING_SNAKE_CASE : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_SCREAMING_SNAKE_CASE : str = arrival_time
_SCREAMING_SNAKE_CASE : Tuple = burst_time # remaining burst time
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # total time of the process wait in ready queue
_SCREAMING_SNAKE_CASE : Dict = 0 # time from arrival time to completion time
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
_SCREAMING_SNAKE_CASE : Any = time_slices
# unfinished process is in this ready_queue
_SCREAMING_SNAKE_CASE : Optional[int] = queue
# current time
_SCREAMING_SNAKE_CASE : Any = current_time
# finished process is in this sequence queue
_SCREAMING_SNAKE_CASE : Optional[Any] = deque()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
_SCREAMING_SNAKE_CASE : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_SCREAMING_SNAKE_CASE : int = 0
# set the process's turnaround time because it is finished
_SCREAMING_SNAKE_CASE : Any = self.current_time - cp.arrival_time
# set the completion time
_SCREAMING_SNAKE_CASE : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
_SCREAMING_SNAKE_CASE : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_SCREAMING_SNAKE_CASE : List[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_SCREAMING_SNAKE_CASE : Any = 0
# set the finish time
_SCREAMING_SNAKE_CASE : Tuple = self.current_time
# update the process' turnaround time because it is finished
_SCREAMING_SNAKE_CASE : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowercase_ : Union[str, Any] = Process('''P1''', 0, 53)
lowercase_ : Any = Process('''P2''', 0, 17)
lowercase_ : Any = Process('''P3''', 0, 68)
lowercase_ : Tuple = Process('''P4''', 0, 24)
lowercase_ : int = 3
lowercase_ : Optional[Any] = [17, 25]
lowercase_ : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowercase_ : str = Process('''P1''', 0, 53)
lowercase_ : Optional[int] = Process('''P2''', 0, 17)
lowercase_ : str = Process('''P3''', 0, 68)
lowercase_ : int = Process('''P4''', 0, 24)
lowercase_ : str = 3
lowercase_ : Dict = [17, 25]
lowercase_ : Any = deque([Pa, Pa, Pa, Pa])
lowercase_ : str = MLFQ(number_of_queues, time_slices, queue, 0)
lowercase_ : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\\n \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\\n \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\\n \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\\n {mlfq.calculate_sequence_of_finish_queue()}'
)
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Tuple = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __UpperCamelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''autoformer'''
_lowercase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "student_t" , SCREAMING_SNAKE_CASE = "nll" , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 6_4 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 3_2 , SCREAMING_SNAKE_CASE = 3_2 , SCREAMING_SNAKE_CASE = "gelu" , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 1_0_0 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE = 1_0 , SCREAMING_SNAKE_CASE = 2_5 , SCREAMING_SNAKE_CASE = 3 , **SCREAMING_SNAKE_CASE , ) -> str:
a__ = prediction_length
a__ = context_length if context_length is not None else prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
a__ = cardinality
else:
a__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
a__ = embedding_dimension
else:
a__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(self.lags_sequence ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
# Autoformer
a__ = label_length
a__ = moving_average
a__ = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def _UpperCAmelCase ( self ) -> Any:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 194 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_00, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""") | 237 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_UpperCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCamelCase : int = os.path.join(git_repo_path, "src", "transformers")
_UpperCamelCase : str = "\n{0} = None\n"
_UpperCamelCase : Optional[int] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_UpperCamelCase : List[str] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' )
self.assertIsNone(_lowercase )
lowerCAmelCase = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_lowercase , 'tokenizers' )
lowerCAmelCase = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_lowercase , 'tensorflow_text' )
lowerCAmelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_lowercase , 'sentencepiece_and_tokenizers' )
lowerCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_lowercase , 'sentencepiece_and_tensorflow_text' )
lowerCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_lowercase , 'sentencepiece_and_tokenizers_and_vision' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _lowercase )
self.assertIn('tensorflow_text' , _lowercase )
self.assertIn('sentencepiece_and_tokenizers' , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_lowercase , '\nCONSTANT = None\n' )
lowerCAmelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_lowercase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowerCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowerCAmelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n'
lowerCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _lowercase )
| 284 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
a_ :Optional[int] ="""AutoTokenizer"""
a_ :List[str] =["""tokenizer"""]
a_ :str ={
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
__a = speaker_embeddings
@classmethod
def __a ( cls : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__a = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("""subfolder""" , _lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , _lowercase ) , force_download=kwargs.pop("""force_download""" , _lowercase ) , proxies=kwargs.pop("""proxies""" , _lowercase ) , resume_download=kwargs.pop("""resume_download""" , _lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , _lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _lowercase ) , revision=kwargs.pop("""revision""" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
__a = None
else:
with open(_lowercase ) as speaker_embeddings_json:
__a = json.load(_lowercase )
else:
__a = None
__a = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE__ : Any="speaker_embeddings" , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , """v2""" ) , exist_ok=_lowercase )
__a = {}
__a = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__a = self._load_voice_preset(_lowercase )
__a = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
__a = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
__a = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , """w""" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = self.speaker_embeddings[voice_preset]
__a = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
__a = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , _lowercase ) , force_download=kwargs.pop("""force_download""" , _lowercase ) , proxies=kwargs.pop("""proxies""" , _lowercase ) , resume_download=kwargs.pop("""resume_download""" , _lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , _lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _lowercase ) , revision=kwargs.pop("""revision""" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
__a = np.load(_lowercase )
return voice_preset_dict
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]="pt" , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__a = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(""".npz""" ):
__a = voice_preset + """.npz"""
__a = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
__a = BatchFeature(data=_lowercase , tensor_type=_lowercase )
__a = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="""max_length""" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
__a = voice_preset
return encoded_text
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase_ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Any:
"""simple docstring"""
lowercase_ : Optional[Any] = SavedModel()
lowercase_ : Optional[int] = []
with open(os.path.join(lowercase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
lowercase_ : str = json.load(lowercase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase )] )
with open(lowercase , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
lowercase_ : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase_ : List[Any] = sorted(lowercase )
lowercase_ : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase )
if strict and len(lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
UpperCAmelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 458 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[Any] = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_, lowerCAmelCase_ : int = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_UpperCamelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_UpperCamelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : Tuple = []
for i in range(len(_lowerCAmelCase ) ):
__lowerCamelCase : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase : Optional[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> str:
__lowerCamelCase : Tuple = []
for _ in range(_lowerCAmelCase ):
# Create output image
__lowerCamelCase : List[Any] = Image.new('RGB' ,(len(cells[0] ), len(_lowerCAmelCase )) )
__lowerCamelCase : Any = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase : Optional[int] = 255 - cells[y][x] * 255
__lowerCamelCase : Optional[Any] = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
__lowerCamelCase : Any = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
_UpperCamelCase = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
UpperCAmelCase = f'''https://www.google.com/search?q={query}&num=100'''
UpperCAmelCase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
UpperCAmelCase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
UpperCAmelCase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 88 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase__ ( lowercase_, unittest.TestCase ):
_UpperCAmelCase :List[str] = RoCBertTokenizer
_UpperCAmelCase :List[str] = None
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = True
_UpperCAmelCase :Optional[Any] = filter_non_english
def UpperCAmelCase__ ( self : str ):
super().setUp()
lowerCamelCase_ : Dict =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase_ : Optional[int] ={}
lowerCamelCase_ : int ={}
for i, value in enumerate(_lowercase ):
lowerCamelCase_ : List[Any] =i
lowerCamelCase_ : Dict =i
lowerCamelCase_ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowerCamelCase_ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : int =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ : Any =tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(_lowercase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : str =RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[int] =RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Dict =RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[int] =RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Tuple =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase_ : Dict ={}
for i, token in enumerate(_lowercase ):
lowerCamelCase_ : Any =i
lowerCamelCase_ : List[Any] =RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase__ ( self : Optional[int] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase__ ( self : int ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Tuple =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowerCamelCase_ : Union[str, Any] =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def UpperCAmelCase__ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
lowerCamelCase_ : List[Any] =F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ : Optional[Any] =tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
lowerCamelCase_ : List[str] =tokenizer_r.do_lower_case if hasattr(_lowercase , "do_lower_case" ) else False
lowerCamelCase_ : Union[str, Any] =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : List[Any] =["的", "人", "有"]
lowerCamelCase_ : str ="".join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : List[Any] =self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
lowerCamelCase_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
lowerCamelCase_ : Dict =tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
lowerCamelCase_ : Dict =tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
lowerCamelCase_ : Union[str, Any] =tokenizer_r.convert_ids_to_tokens(_lowercase )
lowerCamelCase_ : str =tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
lowerCamelCase_ : List[str] =False
lowerCamelCase_ : Tuple =self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
lowerCamelCase_ : int =self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
lowerCamelCase_ : Optional[Any] =tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
lowerCamelCase_ : str =tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
lowerCamelCase_ : Optional[int] =tokenizer_r.convert_ids_to_tokens(_lowercase )
lowerCamelCase_ : Dict =tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ : Tuple =[
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[Any] =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ : Union[str, Any] =tokenizer.encode("你好" , add_special_tokens=_lowercase )
lowerCamelCase_ : Tuple =tokenizer.encode("你是谁" , add_special_tokens=_lowercase )
lowerCamelCase_ : Optional[int] =tokenizer.build_inputs_with_special_tokens(_lowercase )
lowerCamelCase_ : Optional[Any] =tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Any ="你好,你是谁"
lowerCamelCase_ : Optional[int] =tokenizer.tokenize(_lowercase )
lowerCamelCase_ : List[str] =tokenizer.convert_tokens_to_ids(_lowercase )
lowerCamelCase_ : int =tokenizer.convert_tokens_to_shape_ids(_lowercase )
lowerCamelCase_ : Optional[Any] =tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
lowerCamelCase_ : Optional[Any] =tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
lowerCamelCase_ : Optional[int] =tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 153 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ : Tuple = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''LayoutLMv3FeatureExtractor''']
lowercase_ : Dict = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a_ : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ : Tuple = sorted(arg_to_scheduler.keys())
a_ : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class __UpperCamelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="base" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_lowercase )
a__ = 0
a__ = Path(self.hparams.output_dir )
a__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_lowercase , **_lowercase , )
else:
a__ = config
a__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _lowercase , _lowercase ):
assert hasattr(self.config , _lowercase ), f"model config doesn\'t have a `{p}` attribute"
setattr(self.config , _lowercase , getattr(self.hparams , _lowercase ) )
if tokenizer is None:
a__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_lowercase , )
else:
a__ = tokenizer
a__ = MODEL_MODES[mode]
if model is None:
a__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_lowercase , )
else:
a__ = model
def _UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = self.model_type.from_pretrained(*_lowercase , **_lowercase )
def _UpperCAmelCase ( self ) -> int:
a__ = arg_to_scheduler[self.hparams.lr_scheduler]
a__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a__ = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _UpperCAmelCase ( self ) -> Dict:
a__ = self.model
a__ = ['''bias''', '''LayerNorm.weight''']
a__ = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
a__ = Adafactor(
_lowercase , lr=self.hparams.learning_rate , scale_parameter=_lowercase , relative_step=_lowercase )
else:
a__ = AdamW(
_lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a__ = optimizer
a__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return self.validation_step(_lowercase , _lowercase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
return self.validation_end(_lowercase )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if stage == "test":
a__ = len(self.test_dataloader().dataset )
else:
a__ = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_lowercase )
a__ = len(self.train_dataloader().dataset )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Optional[int]:
raise NotImplementedError('''You must implement this for your task''' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.train_loader
def _UpperCAmelCase ( self ) -> Dict:
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_lowercase )
def _UpperCAmelCase ( self ) -> Any:
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_lowercase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_lowercase , list(filter(_lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
a__ = self.output_dir.joinpath('''best_tfmr''' )
a__ = self.step_count
self.model.save_pretrained(_lowercase )
self.tokenizer.save_pretrained(_lowercase )
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
parser.add_argument(
'''--model_name_or_path''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_lowercase , type=_lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_lowercase ).parent / '''test_run''' / '''cache''' ) , type=_lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=_lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_lowercase , metavar=_lowercase , type=_lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_lowercase )
parser.add_argument('''--train_batch_size''' , default=3_2 , type=_lowercase )
parser.add_argument('''--eval_batch_size''' , default=3_2 , type=_lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_lowercase )
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = trainer.lr_schedulers[0]['''scheduler''']
a__ = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_lowercase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
rank_zero_info('''***** Validation results *****''' )
a__ = trainer.callback_metrics
# Log results
for key in sorted(_lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_lowercase , str(metrics[key] ) ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
rank_zero_info('''***** Test results *****''' )
a__ = trainer.callback_metrics
# Log and save results to file
a__ = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_lowercase , '''w''' ) as writer:
for key in sorted(_lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_lowercase , str(metrics[key] ) ) )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__UpperCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__UpperCAmelCase , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__UpperCAmelCase )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__UpperCAmelCase , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__UpperCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__UpperCAmelCase , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__UpperCAmelCase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[] , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
pl.seed_everything(args.seed )
# init model
a__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__UpperCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
a__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__UpperCAmelCase )
if logging_callback is None:
a__ = LoggingCallback()
a__ = {}
if args.fpaa:
a__ = 16
if args.gpus > 1:
a__ = '''auto'''
a__ = '''ddp'''
a__ = args.accumulate_grad_batches
a__ = None
a__ = '''auto'''
a__ = pl.Trainer.from_argparse_args(
__UpperCAmelCase , weights_summary=__UpperCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__UpperCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__UpperCAmelCase , )
if args.do_train:
trainer.fit(__UpperCAmelCase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 194 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a_ : Union[str, Any] = [p / w for p, w in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a_ : Dict = sorted(SCREAMING_SNAKE_CASE__ )
# declaring useful variables
a_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
a_ : Any = 0
a_ : List[str] = 0
a_ : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a_ : Dict = sorted_profit_by_weight[length - i - 1]
a_ : List[str] = profit_by_weight.index(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE_ = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_ = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_ = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight) | 237 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_UpperCamelCase : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_UpperCamelCase : List[str] = [0, 25, 50]
_UpperCamelCase : Union[str, Any] = [25, 50, 75]
_UpperCamelCase : List[Any] = fuzz.membership.trimf(X, abca)
_UpperCamelCase : List[str] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_UpperCamelCase : List[str] = np.ones(75)
_UpperCamelCase : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_UpperCamelCase : List[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_UpperCamelCase : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_UpperCamelCase : Optional[int] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_UpperCamelCase : Dict = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_UpperCamelCase : Union[str, Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_UpperCamelCase : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_UpperCamelCase : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 284 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __a ( cls : Union[str, Any] ):
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def __a ( cls : Optional[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def __a ( self : List[str] ):
'''simple docstring'''
__a = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
__a = FlaxBertModel(_lowercase )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_lowercase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowercase , repo_id="""test-model-flax""" , push_to_hub=_lowercase , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_lowercase , 1E-3 , msg=f'''{key} not identical''' )
def __a ( self : str ):
'''simple docstring'''
__a = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
__a = FlaxBertModel(_lowercase )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_lowercase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_lowercase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_lowercase , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_lowercase , 1E-3 , msg=f'''{key} not identical''' )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__a = False
return models_are_equal
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : int ):
'''simple docstring'''
__a = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__a = FlaxBertModel(_lowercase )
__a = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_lowercase , _lowercase ) )
with self.assertRaises(_lowercase ):
__a = FlaxBertModel.from_pretrained(_lowercase )
__a = FlaxBertModel.from_pretrained(_lowercase , subfolder=_lowercase )
self.assertTrue(check_models_equal(_lowercase , _lowercase ) )
def __a ( self : List[str] ):
'''simple docstring'''
__a = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__a = FlaxBertModel(_lowercase )
__a = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_lowercase , _lowercase ) , max_shard_size="""10KB""" )
with self.assertRaises(_lowercase ):
__a = FlaxBertModel.from_pretrained(_lowercase )
__a = FlaxBertModel.from_pretrained(_lowercase , subfolder=_lowercase )
self.assertTrue(check_models_equal(_lowercase , _lowercase ) )
def __a ( self : Tuple ):
'''simple docstring'''
__a = """bert"""
__a = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_lowercase ):
__a = FlaxBertModel.from_pretrained(_lowercase )
__a = FlaxBertModel.from_pretrained(_lowercase , subfolder=_lowercase )
self.assertIsNotNone(_lowercase )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = """bert"""
__a = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_lowercase ):
__a = FlaxBertModel.from_pretrained(_lowercase )
__a = FlaxBertModel.from_pretrained(_lowercase , subfolder=_lowercase )
self.assertIsNotNone(_lowercase )
| 582 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[Any] = KandinskyVaaPipeline
__a : Optional[Any] = [
"""image_embeds""",
"""negative_image_embeds""",
]
__a : int = ["""image_embeds""", """negative_image_embeds"""]
__a : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__a : Optional[int] = False
@property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return 32
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return 32
@property
def snake_case__ ( self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return 1_00
@property
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : List[str] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase_ : List[str] = UNetaDConditionModel(**_lowercase )
return model
@property
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case__ ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : str = self.dummy_unet
lowercase_ : List[Any] = self.dummy_movq
lowercase_ : Optional[Any] = DDIMScheduler(
num_train_timesteps=10_00, beta_schedule="""linear""", beta_start=0.00085, beta_end=0.012, clip_sample=_lowercase, set_alpha_to_one=_lowercase, steps_offset=1, prediction_type="""epsilon""", thresholding=_lowercase, )
lowercase_ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case__ ( self, snake_case__, snake_case__=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_lowercase )
if str(_lowercase ).startswith("""mps""" ):
lowercase_ : List[Any] = torch.manual_seed(_lowercase )
else:
lowercase_ : Any = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Dict = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : str = """cpu"""
lowercase_ : Dict = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**_lowercase )
lowercase_ : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Optional[Any] = pipe(**self.get_dummy_inputs(_lowercase ) )
lowercase_ : Dict = output.images
lowercase_ : List[Any] = pipe(
**self.get_dummy_inputs(_lowercase ), return_dict=_lowercase, )[0]
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Union[str, Any] = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
lowercase_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
lowercase_ : Optional[int] = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""", torch_dtype=torch.floataa )
lowercase_ : List[str] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
lowercase_ : int = """red cat, 4k photo"""
lowercase_ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
_lowercase, generator=_lowercase, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
lowercase_ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowercase_ : Any = pipeline(
image_embeds=_lowercase, negative_image_embeds=_lowercase, generator=_lowercase, num_inference_steps=1_00, output_type="""np""", )
lowercase_ : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_lowercase, _lowercase ) | 458 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
set_seed(770)
__A : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
__A : Any = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
__A : Any = os.path.dirname(os.path.abspath(__file__))
__A : List[Any] = os.path.join(os.path.expanduser("~"), ".cache")
__A : List[Any] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def UpperCamelCase_ ( A__ : int , A__ : List[str]=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(A__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Optional[Any] ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
hf_hub_download(repo_id=A__ , filename=A__ , local_dir=A__ )
def UpperCamelCase_ ( A__ : str , A__ : Union[str, Any] , A__ : Any=False , A__ : str="text" ):
'''simple docstring'''
if model_type == "text":
lowerCAmelCase_ : Any = BarkSemanticModel
lowerCAmelCase_ : int = BarkSemanticConfig
lowerCAmelCase_ : Tuple = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase_ : Any = BarkCoarseModel
lowerCAmelCase_ : int = BarkCoarseConfig
lowerCAmelCase_ : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase_ : str = BarkFineModel
lowerCAmelCase_ : Dict = BarkFineConfig
lowerCAmelCase_ : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase_ : Union[str, Any] = f'{model_type}_small' if use_small else model_type
lowerCAmelCase_ : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A__ ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowerCAmelCase_ : Union[str, Any] = torch.load(A__ , map_location=A__ )
# this is a hack
lowerCAmelCase_ : Tuple = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowerCAmelCase_ : Optional[Any] = model_args["""vocab_size"""]
lowerCAmelCase_ : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase_ : Optional[int] = model_args.pop("""n_head""" )
lowerCAmelCase_ : Dict = model_args.pop("""n_embd""" )
lowerCAmelCase_ : Any = model_args.pop("""n_layer""" )
lowerCAmelCase_ : str = ConfigClass(**checkpoint["""model_args"""] )
lowerCAmelCase_ : List[Any] = ModelClass(config=A__ )
lowerCAmelCase_ : Optional[int] = GenerationConfigClass()
lowerCAmelCase_ : Any = model_generation_config
lowerCAmelCase_ : Union[str, Any] = checkpoint["""model"""]
# fixup checkpoint
lowerCAmelCase_ : Optional[Any] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(A__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase_ : int = k[len(A__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase_ : Any = new_k.replace(A__ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase_ : List[str] = state_dict.pop(A__ )
lowerCAmelCase_ : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase_ : Any = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowerCAmelCase_ : List[str] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase_ : Union[str, Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(A__ ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(A__ ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(A__ , strict=A__ )
lowerCAmelCase_ : str = model.num_parameters(exclude_embeddings=A__ )
lowerCAmelCase_ : str = checkpoint["""best_val_loss"""].item()
logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(A__ , 3 )} loss' )
model.eval()
model.to(A__ )
del checkpoint, state_dict
return model
def UpperCamelCase_ ( A__ : List[str] , A__ : Any=False , A__ : Optional[int]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase_ : Dict = """cpu""" # do conversion on cpu
lowerCAmelCase_ : Optional[Any] = _get_ckpt_path(A__ , use_small=A__ )
lowerCAmelCase_ : Optional[Any] = _load_model(A__ , A__ , model_type=A__ , use_small=A__ )
# load bark initial model
lowerCAmelCase_ : Tuple = _bark_load_model(A__ , """cpu""" , model_type=A__ , use_small=A__ )
if model_type == "text":
lowerCAmelCase_ : Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=A__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowerCAmelCase_ : Any = 5
lowerCAmelCase_ : Tuple = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase_ : List[Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase_ : List[str] = bark_model(A__ )[0]
lowerCAmelCase_ : int = model(A__ )
# take last logits
lowerCAmelCase_ : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : int = 8
lowerCAmelCase_ : Optional[Any] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase_ : List[Any] = model(A__ , A__ )
lowerCAmelCase_ : Tuple = bark_model(A__ , A__ )
lowerCAmelCase_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
def UpperCamelCase_ ( A__ : Tuple , A__ : int , A__ : Any , A__ : Any , A__ : Optional[int] , A__ : Tuple , ):
'''simple docstring'''
lowerCAmelCase_ : Any = os.path.join(A__ , A__ )
lowerCAmelCase_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(A__ , """config.json""" ) )
lowerCAmelCase_ : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(A__ , """config.json""" ) )
lowerCAmelCase_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(A__ , """config.json""" ) )
lowerCAmelCase_ : Optional[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowerCAmelCase_ : Optional[int] = BarkSemanticModel.from_pretrained(A__ )
lowerCAmelCase_ : Union[str, Any] = BarkCoarseModel.from_pretrained(A__ )
lowerCAmelCase_ : List[str] = BarkFineModel.from_pretrained(A__ )
lowerCAmelCase_ : Optional[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowerCAmelCase_ : List[str] = BarkConfig.from_sub_model_configs(
A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase_ : int = BarkModel(A__ )
lowerCAmelCase_ : Optional[int] = semantic
lowerCAmelCase_ : str = coarseAcoustic
lowerCAmelCase_ : Dict = fineAcoustic
lowerCAmelCase_ : Tuple = codec
lowerCAmelCase_ : Any = bark_generation_config
Path(A__ ).mkdir(exist_ok=A__ )
bark.save_pretrained(A__ , repo_id=A__ , push_to_hub=A__ )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
__A : Any = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 275 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a_ ( _lowerCAmelCase ) -> List[str]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' ,set() )
@pytest.fixture
def a_ ( _lowerCAmelCase ) -> List[Any]:
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : Dict ) -> Dict:
__lowerCamelCase : Optional[Any] = metric_id
class lowerCamelCase_ :
"""simple docstring"""
a_ =[MetricMock(lowercase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def _lowercase ( self : Tuple ) -> Optional[Any]:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' ,HfhMock() )
@pytest.mark.parametrize(
'func, args' ,[(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCamelCase : List[Any] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(_lowerCAmelCase ,match='https://huggingface.co/docs/evaluate' ):
func(*_lowerCAmelCase )
| 459 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def _snake_case ( __snake_case : int , __snake_case : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
_lowerCamelCase : Any = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCamelCase : int = subprocess.run(__snake_case , shell=__snake_case , stdout=subprocess.PIPE )
_lowerCamelCase : Any = output.stdout.decode("""utf-8""" )
_lowerCamelCase : Optional[int] = json.loads(__snake_case )
_lowerCamelCase : str = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__snake_case )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(__snake_case ) )
if len(__snake_case ) > 0:
_lowerCamelCase : Tuple = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
return values.split(""",""" )
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
UpperCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Union[str, Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase( lowercase_ ):
"""simple docstring"""
a : List[Any] ='''wavlm'''
def __init__( self , _lowerCamelCase=3_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=1_2_8 , _lowerCamelCase=1_6 , _lowerCamelCase=3_2_0 , _lowerCamelCase=8_0_0 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_5 , _lowerCamelCase=1_0 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=1_0 , _lowerCamelCase=3_2_0 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0_0 , _lowerCamelCase=2_5_6 , _lowerCamelCase=2_5_6 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=2_5_6 , _lowerCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=5_1_2 , _lowerCamelCase=8_0 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Optional[Any] = feat_extract_norm
UpperCamelCase_: Union[str, Any] = feat_extract_activation
UpperCamelCase_: str = list(_lowercase )
UpperCamelCase_: Optional[Any] = list(_lowercase )
UpperCamelCase_: Dict = list(_lowercase )
UpperCamelCase_: Union[str, Any] = conv_bias
UpperCamelCase_: str = num_buckets
UpperCamelCase_: str = max_bucket_distance
UpperCamelCase_: Dict = num_conv_pos_embeddings
UpperCamelCase_: Any = num_conv_pos_embedding_groups
UpperCamelCase_: List[str] = len(self.conv_dim )
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[Any] = hidden_dropout
UpperCamelCase_: Optional[int] = attention_dropout
UpperCamelCase_: Any = activation_dropout
UpperCamelCase_: str = feat_proj_dropout
UpperCamelCase_: List[Any] = final_dropout
UpperCamelCase_: Optional[int] = layerdrop
UpperCamelCase_: Optional[int] = layer_norm_eps
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Any = num_ctc_classes
UpperCamelCase_: Any = vocab_size
UpperCamelCase_: List[Any] = do_stable_layer_norm
UpperCamelCase_: List[Any] = use_weighted_layer_sum
UpperCamelCase_: Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_: Union[str, Any] = apply_spec_augment
UpperCamelCase_: Dict = mask_time_prob
UpperCamelCase_: str = mask_time_length
UpperCamelCase_: List[Any] = mask_time_min_masks
UpperCamelCase_: Dict = mask_feature_prob
UpperCamelCase_: str = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCamelCase_: Any = num_codevectors_per_group
UpperCamelCase_: Union[str, Any] = num_codevector_groups
UpperCamelCase_: List[Any] = contrastive_logits_temperature
UpperCamelCase_: List[str] = num_negatives
UpperCamelCase_: str = codevector_dim
UpperCamelCase_: Tuple = proj_codevector_dim
UpperCamelCase_: Dict = diversity_loss_weight
# ctc loss
UpperCamelCase_: Dict = ctc_loss_reduction
UpperCamelCase_: Optional[int] = ctc_zero_infinity
# adapter
UpperCamelCase_: Optional[Any] = add_adapter
UpperCamelCase_: Optional[Any] = adapter_kernel_size
UpperCamelCase_: Union[str, Any] = adapter_stride
UpperCamelCase_: Union[str, Any] = num_adapter_layers
UpperCamelCase_: Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase_: List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase_: int = list(_lowercase )
UpperCamelCase_: Optional[Any] = list(_lowercase )
UpperCamelCase_: Optional[Any] = list(_lowercase )
UpperCamelCase_: List[str] = xvector_output_dim
@property
def _a ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 153 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
def __init__( self , snake_case__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = data
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __repr__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = self
while temp:
string_rep.append(F'''{temp.data}''' )
_SCREAMING_SNAKE_CASE : Any = temp.next
return "->".join(_lowercase )
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> List[str]:
if not elements_list:
raise Exception("The Elements List is empty" )
_SCREAMING_SNAKE_CASE : int = Node(elements_list[0] )
for i in range(1, len(lowerCamelCase__ ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = Node(elements_list[i] )
_SCREAMING_SNAKE_CASE : Optional[int] = current.next
return head
def _lowerCAmelCase ( lowerCamelCase__ : Dict ) -> int:
if head_node is not None and isinstance(lowerCamelCase__, lowerCamelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _lowerCAmelCase ( ) -> List[Any]:
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Any = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("Linked List:" )
print(lowerCamelCase__ )
print("Elements in Reverse:" )
print_reverse(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : Tuple = logging.getLogger(__name__)
a_ : str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : Any = field(
default=lowercase_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Union[str, Any] = field(
default=lowercase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowercase_ )} , )
_lowercase : int = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[int] = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : List[str] = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : List[Any] = field(
default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Union[str, Any] = field(
default=lowercase_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[Any] = field(
default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[Any] = field(
default=lowercase_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : List[str] = field(
default=lowercase_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : str = field(
default=lowercase_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : Any = field(
default=lowercase_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : Optional[Any] = field(default=lowercase_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : Tuple = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : Any = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : str = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : List[Any] = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : Any = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
def _dataset(__UpperCAmelCase , __UpperCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__UpperCAmelCase , file_path=__UpperCAmelCase , block_size=args.block_size , ref_path=__UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=__UpperCAmelCase , file_path=__UpperCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__UpperCAmelCase , file_path=__UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__UpperCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
a__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
a__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
a__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
a__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
a__ = AutoModelWithLMHead.from_config(__UpperCAmelCase )
model.resize_token_embeddings(len(__UpperCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
a__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
a__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
a__ = (
get_dataset(__UpperCAmelCase , tokenizer=__UpperCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
a__ = (
get_dataset(__UpperCAmelCase , tokenizer=__UpperCAmelCase , evaluate=__UpperCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
a__ = DataCollatorForPermutationLanguageModeling(
tokenizer=__UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
a__ = DataCollatorForWholeWordMask(
tokenizer=__UpperCAmelCase , mlm_probability=data_args.mlm_probability )
else:
a__ = DataCollatorForLanguageModeling(
tokenizer=__UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a__ = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , data_collator=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , prediction_loss_only=__UpperCAmelCase , )
# Training
if training_args.do_train:
a__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__UpperCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
a__ = trainer.evaluate()
a__ = math.exp(eval_output['''eval_loss'''] )
a__ = {'''perplexity''': perplexity}
a__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __UpperCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__UpperCAmelCase )
return results
def __a ( __UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 194 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class snake_case_ ( lowercase_ ):
__lowerCAmelCase = "token-classification"
def __init__( self , a_ ):
if type(_lowercase ) == dict:
a_ : List[Any] = Namespace(**_lowercase )
a_ : List[str] = import_module("tasks" )
try:
a_ : Union[str, Any] = getattr(_lowercase , hparams.task_type )
a_ : Any = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
a_ : Union[str, Any] = self.token_classification_task.get_labels(hparams.labels )
a_ : Tuple = CrossEntropyLoss().ignore_index
super().__init__(_lowercase , len(self.labels ) , self.mode )
def snake_case_ ( self , **a_ ):
return self.model(**_lowercase )
def snake_case_ ( self , a_ , a_ ):
a_ : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
a_ : Optional[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
a_ : Union[str, Any] = self(**_lowercase )
a_ : List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def snake_case_ ( self ):
a_ : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
a_ : Dict = self._feature_file(_lowercase )
if os.path.exists(_lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _lowercase )
a_ : int = torch.load(_lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
a_ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , _lowercase )
a_ : str = self.token_classification_task.convert_examples_to_features(
_lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , _lowercase )
torch.save(_lowercase , _lowercase )
def snake_case_ ( self , a_ , a_ , a_ = False ):
a_ : List[str] = self._feature_file(_lowercase )
logger.info("Loading features from cached file %s" , _lowercase )
a_ : Tuple = torch.load(_lowercase )
a_ : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
a_ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
a_ : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
a_ : int = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
a_ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase )
def snake_case_ ( self , a_ , a_ ):
"""Compute validation""" ""
a_ : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
a_ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
a_ : Dict = self(**_lowercase )
a_ , a_ : Dict = outputs[:2]
a_ : Any = logits.detach().cpu().numpy()
a_ : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self , a_ ):
a_ : List[str] = torch.stack([x["val_loss"] for x in outputs] ).mean()
a_ : int = np.concatenate([x["pred"] for x in outputs] , axis=0 )
a_ : Any = np.argmax(_lowercase , axis=2 )
a_ : str = np.concatenate([x["target"] for x in outputs] , axis=0 )
a_ : Optional[int] = dict(enumerate(self.labels ) )
a_ : str = [[] for _ in range(out_label_ids.shape[0] )]
a_ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
a_ : Optional[Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
a_ : Optional[Any] = dict(results.items() )
a_ : List[str] = results
return ret, preds_list, out_label_list
def snake_case_ ( self , a_ ):
a_ , a_ , a_ : int = self._eval_end(_lowercase )
a_ : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self , a_ ):
a_ , a_ , a_ : int = self._eval_end(_lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
a_ : Tuple = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( a_ , a_ ):
BaseTransformer.add_model_specific_args(_lowercase , _lowercase )
parser.add_argument(
"--task_type" , default="NER" , type=_lowercase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=_lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=_lowercase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=_lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE_ = NERTransformer.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = NERTransformer(args)
SCREAMING_SNAKE_CASE_ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
SCREAMING_SNAKE_CASE_ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 237 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def snake_case ( snake_case : int , snake_case : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(snake_case , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = F'class {class_name}('
lowerCAmelCase = F'{4 * " "}def {test_name}('
lowerCAmelCase = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = []
for line in lines:
if line.startswith(snake_case ):
lowerCAmelCase = True
elif in_class and line.startswith(snake_case ):
lowerCAmelCase = True
elif in_class and in_func and (line.startswith(snake_case ) or line.startswith(snake_case )):
lowerCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = False
else:
new_lines.append(snake_case )
with open(snake_case , 'w' ) as f:
for line in new_lines:
f.write(snake_case )
def snake_case ( snake_case : List[str] , snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if fail is not None:
with open(snake_case , 'r' ) as f:
lowerCAmelCase = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase = None
with open(snake_case , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = defaultdict(snake_case )
for line in correct_lines:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case , snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_UpperCamelCase : str = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 284 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
a_ :Any =field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ :Dict =Features({"""text""": Value("""string""" )} )
a_ :List[Any] =Features({"""labels""": ClassLabel} )
a_ :Any ="""text"""
a_ :Any ="""labels"""
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase_ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ) -> int:
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase_ : Any = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ) -> Any:
"""simple docstring"""
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ) -> Any:
"""simple docstring"""
lowercase_ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase_ : Tuple = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ , lowercase_ : Optional[Any] = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
"""simple docstring"""
lowercase_ : int = bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowercase_ : Optional[Any] = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase_ : List[str] = 0
lowercase_ : List[str] = 0
lowercase_ : List[str] = image[x_coordinate][y_coordinate]
lowercase_ : Union[str, Any] = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase_ : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase_ : int = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any() | 458 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("""RGB""" )
return image
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( A__ : List[Any] , A__ : int , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : Any = dct.pop(A__ )
lowerCAmelCase_ : Optional[int] = val
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase_ : Any = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase_ : str = torch.cat((q_bias, torch.zeros_like(A__ , requires_grad=A__ ), v_bias) )
lowerCAmelCase_ : str = qkv_bias
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = 3_64 if """coco""" in model_name else 2_24
lowerCAmelCase_ : int = InstructBlipVisionConfig(image_size=A__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCAmelCase_ : Any = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase_ : int = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCAmelCase_ : Tuple = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCAmelCase_ : Union[str, Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCAmelCase_ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
lowerCAmelCase_ : int = InstructBlipConfig(vision_config=A__ , text_config=A__ , qformer_config=A__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[int] , A__ : int=None , A__ : List[str]=False ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowerCAmelCase_ : int = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCAmelCase_ : Optional[Any] = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowerCAmelCase_, lowerCAmelCase_ : Any = get_blipa_config(A__ )
lowerCAmelCase_ : Optional[Any] = InstructBlipForConditionalGeneration(A__ ).eval()
lowerCAmelCase_ : Tuple = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowerCAmelCase_ : List[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase_ : Tuple = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : int = load_model_and_preprocess(
name=A__ , model_type=A__ , is_eval=A__ , device=A__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowerCAmelCase_ : str = original_model.state_dict()
lowerCAmelCase_ : List[Any] = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase_ : int = state_dict.pop(A__ )
if key.startswith("""Qformer.bert""" ):
lowerCAmelCase_ : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowerCAmelCase_ : Tuple = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowerCAmelCase_ : List[str] = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowerCAmelCase_ : List[str] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowerCAmelCase_ : Any = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowerCAmelCase_ : List[Any] = key.replace("""t5""" , """language""" )
lowerCAmelCase_ : int = val
# read in qv biases
read_in_q_v_bias(A__ , A__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A__ , strict=A__ )
lowerCAmelCase_ : Optional[Any] = load_demo_image()
lowerCAmelCase_ : List[Any] = """What is unusual about this image?"""
# create processor
lowerCAmelCase_ : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=A__ , image_std=A__ )
lowerCAmelCase_ : Tuple = InstructBlipProcessor(
image_processor=A__ , tokenizer=A__ , qformer_tokenizer=A__ , )
lowerCAmelCase_ : List[Any] = processor(images=A__ , text=A__ , return_tensors="""pt""" ).to(A__ )
# make sure processor creates exact same pixel values
lowerCAmelCase_ : Tuple = vis_processors["""eval"""](A__ ).unsqueeze(0 ).to(A__ )
lowerCAmelCase_ : Union[str, Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A__ )
original_model.to(A__ )
hf_model.to(A__ )
with torch.no_grad():
if "vicuna" in model_name:
lowerCAmelCase_ : int = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowerCAmelCase_ : List[Any] = hf_model(**A__ ).logits
else:
lowerCAmelCase_ : Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowerCAmelCase_ : Union[str, Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(A__ )
lowerCAmelCase_ : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
lowerCAmelCase_ : Union[str, Any] = hf_model(**A__ , labels=A__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCAmelCase_ : Dict = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A__ , atol=A__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowerCAmelCase_ : Tuple = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowerCAmelCase_ : Optional[Any] = hf_model.generate(
**A__ , do_sample=A__ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCAmelCase_ : Dict = 2
print("""Original generation:""" , A__ )
lowerCAmelCase_ : int = processor.batch_decode(A__ , skip_special_tokens=A__ )
lowerCAmelCase_ : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , A__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
__A : Dict = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
_UpperCamelCase = 'Alexander Joslin'
import operator as op
from .stack import Stack
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__lowerCamelCase : int = Stack()
__lowerCamelCase : Any = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCAmelCase )
elif i == ")":
# RULE 4
__lowerCamelCase : Any = operator_stack.peek()
operator_stack.pop()
__lowerCamelCase : List[Any] = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase : Optional[Any] = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase : Union[str, Any] = operators[opr](_lowerCAmelCase ,_lowerCAmelCase )
operand_stack.push(_lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_UpperCamelCase = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import numpy as np
UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowercase__ :
def __init__( self) -> Dict:
_lowerCamelCase : List[Any] = np.array(_lowercase)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase , _lowerCamelCase : Dict = np.where(letter == self.SQUARE)
_lowerCamelCase : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1])
return indexes
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Any = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : List[Any] = message.lower()
_lowerCamelCase : Any = message.replace(""" """ , """""")
_lowerCamelCase : Dict = message.replace("""j""" , """i""")
_lowerCamelCase : Optional[Any] = np.empty((2, len(_lowercase)))
for letter_index in range(len(_lowercase)):
_lowerCamelCase : Dict = self.letter_to_numbers(message[letter_index])
_lowerCamelCase : List[str] = numbers[0]
_lowerCamelCase : Any = numbers[1]
_lowerCamelCase : str = first_step.reshape(2 * len(_lowercase))
_lowerCamelCase : Union[str, Any] = """"""
for numbers_index in range(len(_lowercase)):
_lowerCamelCase : str = int(second_step[numbers_index * 2])
_lowerCamelCase : Optional[Any] = int(second_step[(numbers_index * 2) + 1])
_lowerCamelCase : Union[str, Any] = self.numbers_to_letter(_lowercase , _lowercase)
_lowerCamelCase : Union[str, Any] = encoded_message + letter
return encoded_message
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Union[str, Any] = message.lower()
message.replace(""" """ , """""")
_lowerCamelCase : Dict = np.empty(2 * len(_lowercase))
for letter_index in range(len(_lowercase)):
_lowerCamelCase : Optional[Any] = self.letter_to_numbers(message[letter_index])
_lowerCamelCase : Union[str, Any] = numbers[0]
_lowerCamelCase : Optional[int] = numbers[1]
_lowerCamelCase : List[str] = first_step.reshape((2, len(_lowercase)))
_lowerCamelCase : Optional[int] = """"""
for numbers_index in range(len(_lowercase)):
_lowerCamelCase : List[Any] = int(second_step[0, numbers_index])
_lowerCamelCase : Dict = int(second_step[1, numbers_index])
_lowerCamelCase : List[str] = self.numbers_to_letter(_lowercase , _lowercase)
_lowerCamelCase : List[Any] = decoded_message + letter
return decoded_message
| 88 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
from __future__ import annotations
import bisect
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0 , UpperCAmelCase__ = -1 ) -> Optional[Any]:
if hi < 0:
UpperCamelCase_: Optional[int] = len(UpperCAmelCase__ )
while lo < hi:
UpperCamelCase_: Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase_: List[Any] = mid + 1
else:
UpperCamelCase_: Tuple = mid
return lo
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0 , UpperCAmelCase__ = -1 ) -> Optional[Any]:
if hi < 0:
UpperCamelCase_: Union[str, Any] = len(UpperCAmelCase__ )
while lo < hi:
UpperCamelCase_: Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase_: int = mid + 1
else:
UpperCamelCase_: Union[str, Any] = mid
return lo
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0 , UpperCAmelCase__ = -1 ) -> Union[str, Any]:
sorted_collection.insert(bisect_left(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0 , UpperCAmelCase__ = -1 ) -> Optional[int]:
sorted_collection.insert(bisect_right(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: Tuple = 0
UpperCamelCase_: int = len(UpperCAmelCase__ ) - 1
while left <= right:
UpperCamelCase_: Union[str, Any] = left + (right - left) // 2
UpperCamelCase_: Any = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase_: Any = midpoint - 1
else:
UpperCamelCase_: Any = midpoint + 1
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: str = bisect.bisect_left(UpperCAmelCase__ , UpperCAmelCase__ )
if index != len(UpperCAmelCase__ ) and sorted_collection[index] == item:
return index
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
if right < left:
return None
UpperCamelCase_: Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCAmelCase__ , UpperCAmelCase__ , midpoint + 1 , UpperCAmelCase__ )
if __name__ == "__main__":
A_ : List[Any] = input('Enter numbers separated by comma:\n').strip()
A_ : Any = sorted(int(item) for item in user_input.split(','))
A_ : List[Any] = int(input('Enter a single number to be found in the list:\n'))
A_ : str = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''') | 57 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
A__ : Optional[Any] = '#'
class lowercase__ :
def __init__( self : List[Any] ):
lowerCamelCase_ : Optional[int] ={}
def UpperCAmelCase__ ( self : Dict , snake_case__ : str ):
lowerCamelCase_ : Optional[int] =self._trie
for char in text:
if char not in trie:
lowerCamelCase_ : Dict ={}
lowerCamelCase_ : str =trie[char]
lowerCamelCase_ : Tuple =True
def UpperCAmelCase__ ( self : Dict , snake_case__ : str ):
lowerCamelCase_ : int =self._trie
for char in prefix:
if char in trie:
lowerCamelCase_ : List[Any] =trie[char]
else:
return []
return self._elements(_lowercase )
def UpperCAmelCase__ ( self : Any , snake_case__ : dict ):
lowerCamelCase_ : Optional[Any] =[]
for c, v in d.items():
lowerCamelCase_ : Optional[Any] =[" "] if c == END else [(c + s) for s in self._elements(_lowercase )]
result.extend(_lowercase )
return tuple(_lowercase )
A__ : Optional[Any] = Trie()
A__ : Any = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _snake_case ( lowerCamelCase__ : str ) -> Any:
lowerCamelCase_ : Optional[int] =trie.find_word(lowerCamelCase__ )
return tuple(string + word for word in suffixes )
def _snake_case ( ) -> List[str]:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : List[Any] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
if is_torch_version("<", "2.0.0" ) or not hasattr(SCREAMING_SNAKE_CASE__, "_dynamo" ):
return False
return isinstance(SCREAMING_SNAKE_CASE__, torch._dynamo.eval_frame.OptimizedModule )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = True ) -> List[str]:
a_ : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a_ : Tuple = is_compiled_module(SCREAMING_SNAKE_CASE__ )
if is_compiled:
a_ : Dict = model
a_ : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
a_ : str = getattr(SCREAMING_SNAKE_CASE__, "forward" )
a_ : List[str] = model.__dict__.pop("_original_forward", SCREAMING_SNAKE_CASE__ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE__, "__wrapped__" ):
a_ : Any = forward.__wrapped__
if forward == original_forward:
break
a_ : Tuple = forward
if getattr(SCREAMING_SNAKE_CASE__, "_converted_to_transformer_engine", SCREAMING_SNAKE_CASE__ ):
convert_model(SCREAMING_SNAKE_CASE__, to_transformer_engine=SCREAMING_SNAKE_CASE__ )
if is_compiled:
a_ : Optional[int] = model
a_ : Optional[Any] = compiled_model
return model
def lowerCAmelCase_ ( ) -> List[str]:
PartialState().wait_for_everyone()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@contextmanager
def lowerCAmelCase_ ( **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key, value in kwargs.items():
a_ : str = str(SCREAMING_SNAKE_CASE__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if not hasattr(SCREAMING_SNAKE_CASE__, "__qualname__" ) and not hasattr(SCREAMING_SNAKE_CASE__, "__name__" ):
a_ : List[str] = getattr(SCREAMING_SNAKE_CASE__, "__class__", SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__, "__qualname__" ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE__, "__name__" ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = destination.setdefault(SCREAMING_SNAKE_CASE__, {} )
merge_dicts(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
a_ : Union[str, Any] = value
return destination
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
if port is None:
a_ : Dict = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0 | 237 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCamelCase : Dict = logging.getLogger(__name__)
class _snake_case :
def __init__( self ):
'''simple docstring'''
lowerCAmelCase = False
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not self.initialized:
lowerCAmelCase = RagRetriever(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
lowerCAmelCase = True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.retriever._main_retrieve(_lowercase , _lowercase )
return doc_ids, retrieved_doc_embeds
class _snake_case ( lowercase_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowercase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowercase , _lowercase , _lowercase , _lowercase )
for worker in self.retrieval_workers
] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase , lowerCAmelCase = ray.get(random_worker.retrieve.remote(_lowercase , _lowercase ) )
else:
lowerCAmelCase , lowerCAmelCase = self._main_retrieve(_lowercase , _lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return super(_lowercase , cls ).get_tokenizers(_lowercase , _lowercase , **_lowercase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = kwargs.pop('config' , _lowercase ) or RagConfig.from_pretrained(_lowercase , **_lowercase )
lowerCAmelCase = RagTokenizer.from_pretrained(_lowercase , config=_lowercase )
lowerCAmelCase = rag_tokenizer.question_encoder
lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase = 'custom'
lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , _lowercase )
else:
lowerCAmelCase = cls._build_index(_lowercase )
return cls(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , retrieval_workers=_lowercase , index=_lowercase , )
| 284 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a , __a = [], []
while len(__SCREAMING_SNAKE_CASE ) > 1:
__a , __a = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
start.append(__SCREAMING_SNAKE_CASE )
end.append(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 582 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import numpy as np
def __magic_name__ ( lowercase , lowercase , lowercase = 1E-12 , lowercase = 100 , ) -> Dict:
"""simple docstring"""
assert np.shape(lowercase )[0] == np.shape(lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase )[0] == np.shape(lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase ) == np.iscomplexobj(lowercase )
lowercase_ : Tuple = np.iscomplexobj(lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowercase_ : int = False
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = 0
lowercase_ : Union[str, Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
lowercase_ : Optional[int] = np.dot(lowercase , lowercase )
# Normalize the resulting output vector.
lowercase_ : Any = w / np.linalg.norm(lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowercase_ : Tuple = vector.conj().T if is_complex else vector.T
lowercase_ : List[Any] = np.dot(lowercase , np.dot(lowercase , lowercase ) )
# Check convergence.
lowercase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowercase_ : Optional[int] = True
lowercase_ : Dict = lambda_
if is_complex:
lowercase_ : Dict = np.real(lambda_ )
return lambda_, vector
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
lowercase_ : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowercase_ : Optional[Any] = np.array([41, 4, 20] )
lowercase_ : List[str] = real_input_matrix.astype(np.complexaaa )
lowercase_ : Optional[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowercase_ : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowercase_ : Tuple = real_input_matrix
lowercase_ : int = real_vector
elif problem_type == "complex":
lowercase_ : int = complex_input_matrix
lowercase_ : Optional[Any] = complex_vector
# Our implementation.
lowercase_ , lowercase_ : Optional[int] = power_iteration(lowercase , lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowercase_ , lowercase_ : str = np.linalg.eigh(lowercase )
# Last eigenvalue is the maximum one.
lowercase_ : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowercase_ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase ) - np.abs(lowercase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 458 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Any = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Any = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A__ )[0]
@deprecated(A__ , """Please use tf.data to implement this functionality.""" )
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase_ : Optional[int] = _readaa(A__ )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCAmelCase_ : Optional[int] = _readaa(A__ )
lowerCAmelCase_ : Optional[int] = _readaa(A__ )
lowerCAmelCase_ : str = _readaa(A__ )
lowerCAmelCase_ : str = bytestream.read(rows * cols * num_images )
lowerCAmelCase_ : Dict = numpy.frombuffer(A__ , dtype=numpy.uinta )
lowerCAmelCase_ : Optional[Any] = data.reshape(A__ , A__ , A__ , 1 )
return data
@deprecated(A__ , """Please use tf.one_hot on tensors.""" )
def UpperCamelCase_ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Dict = labels_dense.shape[0]
lowerCAmelCase_ : Optional[int] = numpy.arange(A__ ) * num_classes
lowerCAmelCase_ : Any = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase_ : Union[str, Any] = 1
return labels_one_hot
@deprecated(A__ , """Please use tf.data to implement this functionality.""" )
def UpperCamelCase_ ( A__ : int , A__ : Optional[Any]=False , A__ : Tuple=10 ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase_ : List[Any] = _readaa(A__ )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCAmelCase_ : List[Any] = _readaa(A__ )
lowerCAmelCase_ : str = bytestream.read(A__ )
lowerCAmelCase_ : Optional[int] = numpy.frombuffer(A__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A__ , A__ )
return labels
class __snake_case :
"""simple docstring"""
@deprecated(
_lowercase , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=False , lowerCamelCase : str=False , lowerCamelCase : Dict=dtypes.floataa , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=None , ) -> Dict:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase_ : str = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCAmelCase_ : int = 1_00_00
lowerCAmelCase_ : Dict = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase_ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase_ : Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase_ : Dict = images.astype(numpy.floataa )
lowerCAmelCase_ : int = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowerCAmelCase_ : Dict = images
lowerCAmelCase_ : Optional[Any] = labels
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Dict = 0
@property
def __lowercase ( self : Tuple ) -> List[Any]:
return self._images
@property
def __lowercase ( self : Union[str, Any] ) -> str:
return self._labels
@property
def __lowercase ( self : Dict ) -> List[str]:
return self._num_examples
@property
def __lowercase ( self : Tuple ) -> Optional[int]:
return self._epochs_completed
def __lowercase ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any=False , lowerCamelCase : Union[str, Any]=True ) -> Any:
if fake_data:
lowerCAmelCase_ : int = [1] * 7_84
lowerCAmelCase_ : int = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowerCAmelCase_ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase_ : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowerCAmelCase_ : int = self.images[perma]
lowerCAmelCase_ : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase_ : Tuple = self._num_examples - start
lowerCAmelCase_ : Dict = self._images[start : self._num_examples]
lowerCAmelCase_ : str = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase_ : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowerCAmelCase_ : int = self.images[perm]
lowerCAmelCase_ : Tuple = self.labels[perm]
# Start next epoch
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = batch_size - rest_num_examples
lowerCAmelCase_ : Optional[Any] = self._index_in_epoch
lowerCAmelCase_ : str = self._images[start:end]
lowerCAmelCase_ : Dict = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase_ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A__ , """Please write your own downloading logic.""" )
def UpperCamelCase_ ( A__ : Optional[int] , A__ : List[Any] , A__ : Optional[Any] ):
'''simple docstring'''
if not gfile.Exists(A__ ):
gfile.MakeDirs(A__ )
lowerCAmelCase_ : Any = os.path.join(A__ , A__ )
if not gfile.Exists(A__ ):
urllib.request.urlretrieve(A__ , A__ ) # noqa: S310
with gfile.GFile(A__ ) as f:
lowerCAmelCase_ : Union[str, Any] = f.size()
print("""Successfully downloaded""" , A__ , A__ , """bytes.""" )
return filepath
@deprecated(
A__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase_ ( A__ : int , A__ : Dict=False , A__ : Optional[Any]=False , A__ : str=dtypes.floataa , A__ : Any=True , A__ : Any=50_00 , A__ : Tuple=None , A__ : str=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A__ , one_hot=A__ , dtype=A__ , seed=A__ )
lowerCAmelCase_ : List[str] = fake()
lowerCAmelCase_ : Union[str, Any] = fake()
lowerCAmelCase_ : Optional[int] = fake()
return _Datasets(train=A__ , validation=A__ , test=A__ )
if not source_url: # empty string check
lowerCAmelCase_ : Any = DEFAULT_SOURCE_URL
lowerCAmelCase_ : List[Any] = """train-images-idx3-ubyte.gz"""
lowerCAmelCase_ : Any = """train-labels-idx1-ubyte.gz"""
lowerCAmelCase_ : Optional[int] = """t10k-images-idx3-ubyte.gz"""
lowerCAmelCase_ : Any = """t10k-labels-idx1-ubyte.gz"""
lowerCAmelCase_ : Dict = _maybe_download(
A__ , A__ , source_url + train_images_file )
with gfile.Open(A__ , """rb""" ) as f:
lowerCAmelCase_ : List[str] = _extract_images(A__ )
lowerCAmelCase_ : Optional[Any] = _maybe_download(
A__ , A__ , source_url + train_labels_file )
with gfile.Open(A__ , """rb""" ) as f:
lowerCAmelCase_ : Union[str, Any] = _extract_labels(A__ , one_hot=A__ )
lowerCAmelCase_ : Dict = _maybe_download(
A__ , A__ , source_url + test_images_file )
with gfile.Open(A__ , """rb""" ) as f:
lowerCAmelCase_ : str = _extract_images(A__ )
lowerCAmelCase_ : str = _maybe_download(
A__ , A__ , source_url + test_labels_file )
with gfile.Open(A__ , """rb""" ) as f:
lowerCAmelCase_ : Tuple = _extract_labels(A__ , one_hot=A__ )
if not 0 <= validation_size <= len(A__ ):
lowerCAmelCase_ : Tuple = (
"""Validation size should be between 0 and """
f'{len(A__ )}. Received: {validation_size}.'
)
raise ValueError(A__ )
lowerCAmelCase_ : Optional[Any] = train_images[:validation_size]
lowerCAmelCase_ : Tuple = train_labels[:validation_size]
lowerCAmelCase_ : Dict = train_images[validation_size:]
lowerCAmelCase_ : Any = train_labels[validation_size:]
lowerCAmelCase_ : List[Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCAmelCase_ : List[str] = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase_ : Optional[Any] = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase_ : Optional[Any] = _DataSet(A__ , A__ , **A__ )
return _Datasets(train=A__ , validation=A__ , test=A__ )
| 275 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""BlipImageProcessor"""
a_ ="""AutoTokenizer"""
def __init__( self : Optional[Any] , _a : Tuple , _a : Optional[Any] ) -> Optional[int]:
__lowerCamelCase : Dict = False
super().__init__(_lowercase , _lowercase )
__lowerCamelCase : str = self.image_processor
def __call__( self : Union[str, Any] , _a : ImageInput = None , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : int , ) -> List[Any]:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Optional[int] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
return text_encoding
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase )
if text is not None:
__lowerCamelCase : Optional[Any] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
else:
__lowerCamelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(_lowercase )
return encoding_image_processor
def _lowercase ( self : List[Any] , *_a : Any , **_a : Optional[Any] ) -> int:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowercase ( self : Optional[int] , *_a : Dict , **_a : Optional[Any] ) -> Dict:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 459 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
UpperCAmelCase = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
UpperCAmelCase = ["""a""", """b""", """c""", """d""", """e"""]
def _snake_case ( __snake_case : int , __snake_case : str , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : int = start
# add current to visited
visited.append(__snake_case )
_lowerCamelCase : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCamelCase : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
_lowerCamelCase : int = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase = topological_sort("""a""", [], [])
print(sort)
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case () -> int:
raise RuntimeError('CUDA out of memory.' )
class _lowerCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
UpperCamelCase_: Union[str, Any] = nn.Linear(3 , 4 )
UpperCamelCase_: Dict = nn.BatchNormad(4 )
UpperCamelCase_: List[str] = nn.Linear(4 , 5 )
def _a ( self , _lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(_lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(_lowerCamelCase , _lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase_ ,UpperCamelCase_: str = mock_training_loop_function('hello' )
self.assertListEqual(_lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _a ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowerCamelCase ):
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _a ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(_lowerCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _a ( self ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _a ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(_lowerCamelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _a ( self ):
UpperCamelCase_: Union[str, Any] = torch.cuda.memory_allocated()
UpperCamelCase_: Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase )
UpperCamelCase_: Optional[Any] = release_memory(_lowercase )
self.assertEqual(torch.cuda.memory_allocated() , _lowercase ) | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Tuple:
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , ) -> Union[str, Any]:
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , ) -> Tuple:
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowerCamelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = [[] for _ in range(lowerCamelCase__ )]
_SCREAMING_SNAKE_CASE : int = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowerCamelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Dict = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = ["".join(lowerCamelCase__ ) for row in temp_grid]
_SCREAMING_SNAKE_CASE : Dict = "".join(lowerCamelCase__ )
return output_string
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : List[str] ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : List[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
_SCREAMING_SNAKE_CASE : Tuple = [[] for _ in range(lowerCamelCase__ )] # generates template
for position in range(len(lowerCamelCase__ ) ):
_SCREAMING_SNAKE_CASE : List[Any] = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : List[str] = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
_SCREAMING_SNAKE_CASE : Dict = 0
for row in temp_grid: # fills in the characters
_SCREAMING_SNAKE_CASE : Dict = input_string[counter : counter + len(lowerCamelCase__ )]
grid.append(list(lowerCamelCase__ ) )
counter += len(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = "" # reads as zigzag
for position in range(len(lowerCamelCase__ ) ):
_SCREAMING_SNAKE_CASE : int = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : Tuple = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
for key_guess in range(1, len(lowerCamelCase__ ) ): # tries every key
_SCREAMING_SNAKE_CASE : str = decrypt(lowerCamelCase__, lowerCamelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = LayoutLMTokenizer
_lowercase : Union[str, Any] = LayoutLMTokenizerFast
_lowercase : str = True
_lowercase : List[str] = True
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().setUp()
a__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
a__ = '''UNwant\u00E9d,running'''
a__ = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = self.tokenizer_class(self.vocab_file )
a__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 1_0, 8, 9] )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
| 194 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE_ = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class snake_case_ ( unittest.TestCase ,lowercase_ ):
def snake_case_ ( self ):
a_ : Dict = load_tool("text-question-answering" )
self.tool.setup()
a_ : Optional[int] = load_tool("text-question-answering" , remote=_lowercase )
def snake_case_ ( self ):
a_ : int = self.tool(_lowercase , "What did Hugging Face do in April 2021?" )
self.assertEqual(_lowercase , "launched the BigScience Research Workshop" )
def snake_case_ ( self ):
a_ : Any = self.remote_tool(_lowercase , "What did Hugging Face do in April 2021?" )
self.assertEqual(_lowercase , "launched the BigScience Research Workshop" )
def snake_case_ ( self ):
a_ : Optional[int] = self.tool(text=_lowercase , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_lowercase , "launched the BigScience Research Workshop" )
def snake_case_ ( self ):
a_ : List[str] = self.remote_tool(text=_lowercase , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_lowercase , "launched the BigScience Research Workshop" ) | 237 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase , return_dict=_lowercase )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'google/ncsnpp-church-256'
lowerCAmelCase = UNetaDModel.from_pretrained(_lowercase )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_lowercase )
lowerCAmelCase = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_lowercase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 284 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.