code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase: Dict = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = ['MobileNetV2FeatureExtractor']
_lowerCAmelCase: Optional[int] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Dict = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=99 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : str="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=0 , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
_a = XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , ) -> int:
"""simple docstring"""
_a = XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_a = XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
_a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , ) -> Dict:
"""simple docstring"""
_a = XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , ) -> Optional[Any]:
"""simple docstring"""
_a = self.num_labels
_a = XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , ) -> Any:
"""simple docstring"""
_a = self.num_choices
_a = XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
_a = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = XLMModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=1 ) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = min_length + idx + 1
_a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=1 ) -> Optional[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCAmelCase_ )
_a = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
_a = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
import sys
import turtle
def _snake_case (__lowercase , __lowercase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
snake_case__ : Tuple = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
snake_case__ : Optional[Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 23 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : str = '''resnet'''
__lowercase : Optional[int] = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = downsample_in_first_stage
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[Any] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-3
| 24 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
import math
import sys
def lowerCamelCase__ ( _a):
if number != int(_a):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
SCREAMING_SNAKE_CASE : List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in range(1 , number + 1):
SCREAMING_SNAKE_CASE : str = sys.maxsize
SCREAMING_SNAKE_CASE : Dict = int(math.sqrt(_a))
for j in range(1 , root + 1):
SCREAMING_SNAKE_CASE : Tuple = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE : int = min(_a , _a)
SCREAMING_SNAKE_CASE : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( _lowerCamelCase ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( _lowerCamelCase ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(_lowerCamelCase ):
__snake_case , __snake_case : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
__snake_case : List[str] = 0
return None
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 26 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
_A, _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A, _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'bird'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_A = pipe.prepare_image_inputs([canny_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = replicate(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A, _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A, _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'Chef in the kitchen'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_A = pipe.prepare_image_inputs([pose_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = replicate(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 27 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase_ = "pt"
elif is_tf_available():
UpperCamelCase_ = "tf"
else:
UpperCamelCase_ = "jax"
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = ByTaTokenizer
A : Tuple = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A, A=False, A=20, A=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(A ) ):
try:
SCREAMING_SNAKE_CASE : int = tokenizer.decode([i], clean_up_tokenization_spaces=A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda A : re.match(r'^[ a-zA-Z]+$', t[1] ), A ) )
SCREAMING_SNAKE_CASE : Any = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1], add_special_tokens=A ), A ) )
if max_length is not None and len(A ) > max_length:
SCREAMING_SNAKE_CASE : List[Any] = toks[:max_length]
if min_length is not None and len(A ) < min_length and len(A ) > 0:
while len(A ) < min_length:
SCREAMING_SNAKE_CASE : str = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : str = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(A, clean_up_tokenization_spaces=A )
if " " not in output_txt and len(A ) > 1:
SCREAMING_SNAKE_CASE : Optional[int] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=A )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : Dict = ' ' + output_txt
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(A, add_special_tokens=A )
return output_txt, output_ids
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
SCREAMING_SNAKE_CASE : str = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Dict = 'Unicode €.'
SCREAMING_SNAKE_CASE : Any = tokenizer(A )
SCREAMING_SNAKE_CASE : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'], A )
# decoding
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(A )
self.assertEqual(A, 'Unicode €.</s>' )
SCREAMING_SNAKE_CASE : int = tokenizer('e è é ê ë' )
SCREAMING_SNAKE_CASE : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'], A )
# decoding
SCREAMING_SNAKE_CASE : str = tokenizer.decode(A )
self.assertEqual(A, 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), 'e è é ê ë</s>' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE : Any = tokenizer(A, padding=A, return_tensors=A )
self.assertIsInstance(A, A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A, A )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(A, padding=A, return_tensors=A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', A )
self.assertIn('attention_mask', A )
self.assertNotIn('decoder_input_ids', A )
self.assertNotIn('decoder_attention_mask', A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = [
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE : str = tokenizer(
text_target=A, max_length=32, padding='max_length', truncation=A, return_tensors=A )
self.assertEqual(32, targets['input_ids'].shape[1] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : List[str] = ['A long paragraph for summarization. </s>']
SCREAMING_SNAKE_CASE : Tuple = ['Summary of the text. </s>']
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE : int = tokenizer(A, text_target=A )
self.assertEqual(A, batch['input_ids'][0] )
self.assertEqual(A, batch['labels'][0] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = ' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(A, add_special_tokens=A )
tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = after_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
shutil.rmtree(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A )
tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
SCREAMING_SNAKE_CASE : Dict = tokenizer.__class__.from_pretrained(A, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
with open(os.path.join(A, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE : List[Any] = json.load(A )
with open(os.path.join(A, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE : Any = json.load(A )
SCREAMING_SNAKE_CASE : Optional[Any] = [F"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(A, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(A, A )
with open(os.path.join(A, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(A, A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE : Dict = tokenizer_class.from_pretrained(
A, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=A )]
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(
A, additional_special_tokens=A, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers(fast=A, do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Optional[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_string(A )
self.assertIsInstance(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(
A, skip_special_tokens=A )
for attr in attributes_list:
setattr(A, attr + '_id', A )
self.assertEqual(getattr(A, A ), A )
self.assertEqual(getattr(A, attr + '_id' ), A )
setattr(A, attr + '_id', A )
self.assertEqual(getattr(A, A ), A )
self.assertEqual(getattr(A, attr + '_id' ), A )
setattr(A, 'additional_special_tokens_ids', [] )
self.assertListEqual(getattr(A, 'additional_special_tokens' ), [] )
self.assertListEqual(getattr(A, 'additional_special_tokens_ids' ), [] )
setattr(A, 'additional_special_tokens_ids', [token_id_to_test_setters] )
self.assertListEqual(getattr(A, 'additional_special_tokens' ), [token_to_test_setters] )
self.assertListEqual(getattr(A, 'additional_special_tokens_ids' ), [token_id_to_test_setters] )
| 28 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'xlm'
a__: str = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , UpperCAmelCase=3_0145 , UpperCAmelCase=2048 , UpperCAmelCase=12 , UpperCAmelCase=16 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=512 , UpperCAmelCase=2048**-0.5 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=5 , UpperCAmelCase=True , UpperCAmelCase="first" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=5 , UpperCAmelCase=5 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = emb_dim
lowerCamelCase_ = n_layers
lowerCamelCase_ = n_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = gelu_activation
lowerCamelCase_ = sinusoidal_embeddings
lowerCamelCase_ = causal
lowerCamelCase_ = asm
lowerCamelCase_ = n_langs
lowerCamelCase_ = use_lang_emb
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = bos_index
lowerCamelCase_ = eos_index
lowerCamelCase_ = pad_index
lowerCamelCase_ = unk_index
lowerCamelCase_ = mask_index
lowerCamelCase_ = is_encoder
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = embed_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_proj_to_labels
lowerCamelCase_ = summary_first_dropout
lowerCamelCase_ = start_n_top
lowerCamelCase_ = end_n_top
lowerCamelCase_ = mask_token_id
lowerCamelCase_ = lang_id
if "n_words" in kwargs:
lowerCamelCase_ = kwargs['''n_words''']
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 29 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return "".join(sorted(_lowercase ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return word_by_signature[signature(_lowercase )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 30 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ : Dict = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ : List[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ : Optional[int] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ : int = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=0.9 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE_ = [
meteor_score.single_meteor_score(
word_tokenize(_lowerCAmelCase ) , word_tokenize(_lowerCAmelCase ) , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase )
for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase )
]
else:
SCREAMING_SNAKE_CASE_ = [
meteor_score.single_meteor_score(_lowerCAmelCase , _lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase )
for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase )
]
return {"meteor": np.mean(_lowerCAmelCase )} | 31 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , _UpperCamelCase ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(_UpperCamelCase ) # fails here
def UpperCamelCase( self ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase( self ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 32 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = 0
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
# Check that tokenizer_type ≠ model_type
snake_case__ = AutoTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) )
snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' , use_fast=_a )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) )
snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' , use_fast=_a )
self.assertIsInstance(_a , _a )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) )
snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) )
snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with pytest.raises(_a ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
if isinstance(_a , _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a )
else:
self.assertEqual(tokenizer.do_lower_case , _a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
snake_case__ = TOKENIZER_MAPPING.values()
snake_case__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:int ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_a ) , _a )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _a )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_a )
snake_case__ = '''Hello, world. How are you?'''
snake_case__ = tokenizer.tokenize(_a )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_a )
snake_case__ = tokenizer.tokenize(_a )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_a ) , _a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
# Check we can load the tokenizer config of an online model.
snake_case__ = get_tokenizer_config('''bert-base-cased''' )
snake_case__ = config.pop('''_commit_hash''' , _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case__ = get_tokenizer_config(_a )
self.assertDictEqual(_a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case__ = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
try:
AutoConfig.register('''custom''' , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
snake_case__ = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:str ):
try:
AutoConfig.register('''custom''' , _a )
# Can register in two steps
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a , slow_tokenizer_class=_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
snake_case__ = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
snake_case__ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self:str ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = False
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = NewTokenizer
__lowercase : List[str] = False
try:
AutoConfig.register('''custom''' , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with self.assertRaisesRegex(
_a , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoTokenizer.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoTokenizer.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Make sure we have cached the tokenizer.
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 33 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
"""simple docstring"""
import random
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if index >= len(_lowercase ) or index < 0:
return None
UpperCamelCase = items[random.randint(0 ,len(_lowercase ) - 1 )]
UpperCamelCase = 0
UpperCamelCase , UpperCamelCase , UpperCamelCase = _partition(_lowercase ,_lowercase )
UpperCamelCase = len(_lowercase )
UpperCamelCase = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase ,_lowercase )
# must be in larger
else:
return quick_select(_lowercase ,index - (m + count) ) | 34 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
from functools import reduce
a_ :Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( A__ = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A__ , A__ : str(int(A__ ) * int(A__ ) ) , n[i : i + 1_3] ) )
for i in range(len(A__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from collections import defaultdict
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = 1
snake_case : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase , __lowercase : Union[str, Any] = 10, 9
__lowercase : Dict = defaultdict(list)
__lowercase : dict[int, bool] = {}
__lowercase : list[int] = []
__lowercase : Optional[Any] = 0
__lowercase : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 36 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
A_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase__ ( __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__magic_name__ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
snake_case__ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case__ : Dict = PipelineDataFormat.from_str(
format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__magic_name__ , __magic_name__ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = nlp
snake_case__ : Union[str, Any] = reader
@staticmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[Any] = self._nlp, []
for entry in self._reader:
snake_case__ : Tuple = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case__ : int = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer
def __init__( self : List[str] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int="<|endoftext|>" , _UpperCamelCase : List[Any]="<|endoftext|>" , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[str]=False , **_UpperCamelCase : Tuple , ) ->Optional[Any]:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = kwargs.pop('''add_bos_token''' , _UpperCamelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Tuple , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : str , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids | 39 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__UpperCAmelCase = True
from torch.cuda.amp import autocast
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ : List[str]=None , snake_case__ : List[str]=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : Optional[bool] = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self, SCREAMING_SNAKE_CASE_ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
UpperCamelCase : Any = [{'input_values': feature['input_values']} for feature in features]
UpperCamelCase : int = [{'input_ids': feature['labels']} for feature in features]
UpperCamelCase : Dict = self.processor.pad(
SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', )
UpperCamelCase : Optional[int] = self.processor.pad(
labels=SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt', )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ), -100 )
UpperCamelCase : Dict = labels
return batch
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
model.train()
UpperCamelCase : Optional[Any] = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
if self.use_amp:
with autocast():
UpperCamelCase : str = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : Optional[int] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE_ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE_, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCamelCase : Optional[int] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCamelCase : str = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
UpperCamelCase : List[Any] = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = re.sub(snake_case__ , '' , batch['sentence'] ).lower() + ' '
return batch
UpperCamelCase : List[Any] = train_dataset.map(snake_case__ , remove_columns=['sentence'] )
UpperCamelCase : Dict = eval_dataset.map(snake_case__ , remove_columns=['sentence'] )
def extract_all_chars(snake_case__ : Dict ):
UpperCamelCase : Union[str, Any] = ' '.join(batch['text'] )
UpperCamelCase : List[str] = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Any = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
UpperCamelCase : List[Any] = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Tuple = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
UpperCamelCase : Optional[Any] = {v: k for k, v in enumerate(snake_case__ )}
UpperCamelCase : Optional[int] = vocab_dict[' ']
del vocab_dict[" "]
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : Dict = len(snake_case__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[Any] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
UpperCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Optional[Any] = min(len(snake_case__ ) , data_args.max_train_samples )
UpperCamelCase : Tuple = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
UpperCamelCase : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCamelCase : int = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ : int ):
UpperCamelCase , UpperCamelCase : Tuple = torchaudio.load(batch['path'] )
UpperCamelCase : Optional[Any] = resampler(snake_case__ ).squeeze().numpy()
UpperCamelCase : Dict = 16000
UpperCamelCase : Optional[int] = batch['text']
return batch
UpperCamelCase : Optional[int] = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : str = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
UpperCamelCase : Dict = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(snake_case__ )
return batch
UpperCamelCase : int = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Dict = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : List[str] = datasets.load_metric('wer' )
def compute_metrics(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = pred.predictions
UpperCamelCase : int = np.argmax(snake_case__ , axis=-1 )
UpperCamelCase : List[Any] = processor.tokenizer.pad_token_id
UpperCamelCase : int = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
UpperCamelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
UpperCamelCase : Dict = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Optional[int] = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
UpperCamelCase : Tuple = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase : List[str] = model_args.model_name_or_path
else:
UpperCamelCase : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCamelCase : Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
UpperCamelCase : Dict = train_result.metrics
UpperCamelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
UpperCamelCase : Union[str, Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('train' , snake_case__ )
trainer.save_metrics('train' , snake_case__ )
trainer.save_state()
# Evaluation
UpperCamelCase : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase : int = trainer.evaluate()
UpperCamelCase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
UpperCamelCase : Optional[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
return results
if __name__ == "__main__":
main()
| 40 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = CodeGenTokenizer
SCREAMING_SNAKE_CASE : List[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Dict = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ):
__lowercase = '''lower newer'''
__lowercase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = '''lower newer'''
__lowercase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = '''lower newer'''
# Testing tokenization
__lowercase = tokenizer.tokenize(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : str ,**lowercase__ : Optional[int] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input looooooooong''', '''This is a simple input''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(lowercase__ ,padding='''max_length''' ,max_length=3_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
__lowercase = tokenizer(*lowercase__ ,padding='''max_length''' ,max_length=6_0 ,return_tensors='''np''' )
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,truncate=lowercase__ ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''$$$'''
__lowercase = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowercase__ ,add_bos_token=lowercase__ )
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(lowercase__ )
__lowercase = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] ,lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
__lowercase = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
__lowercase = '''\nif len_a > len_b: result = a\nelse: result = b'''
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
__lowercase = tokenizer.decode(lowercase__ ,truncate_before_pattern=lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 41 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
from __future__ import annotations
from typing import TypedDict
class _a ( UpperCamelCase__ ):
_lowercase : str
_lowercase : int
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE ) )]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
lowercase__ = all_rotations(SCREAMING_SNAKE_CASE )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE ),
}
return response
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
lowercase__ = int(SCREAMING_SNAKE_CASE )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
lowercase__ = [''''''] * len(SCREAMING_SNAKE_CASE )
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase = 'Provide a string that I will generate its BWT transform: '
lowerCAmelCase = input(entry_msg).strip()
lowerCAmelCase = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
lowerCAmelCase = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 43 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'WhisperFeatureExtractor'
lowerCAmelCase_ = 'WhisperTokenizer'
def __init__( self : List[str],__A : Tuple,__A : Any ):
super().__init__(__A,__A )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : List[str] = False
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any]=None,__A : str=None,__A : Tuple=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A,language=__A,no_timestamps=__A )
def __call__( self : Union[str, Any],*__A : List[str],**__A : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A,**__A )
_lowerCamelCase : Dict = kwargs.pop("audio",__A )
_lowerCamelCase : List[Any] = kwargs.pop("sampling_rate",__A )
_lowerCamelCase : Optional[Any] = kwargs.pop("text",__A )
if len(__A ) > 0:
_lowerCamelCase : Dict = args[0]
_lowerCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowerCamelCase : Any = self.feature_extractor(__A,*__A,sampling_rate=__A,**__A )
if text is not None:
_lowerCamelCase : Any = self.tokenizer(__A,**__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : int = encodings["input_ids"]
return inputs
def lowerCamelCase_ ( self : Any,*__A : List[Any],**__A : Tuple ):
return self.tokenizer.batch_decode(*__A,**__A )
def lowerCamelCase_ ( self : List[str],*__A : Optional[Any],**__A : Optional[int] ):
return self.tokenizer.decode(*__A,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : List[str]="np" ):
return self.tokenizer.get_prompt_ids(__A,return_tensors=__A ) | 44 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = StableDiffusionSAGPipeline
_snake_case : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_snake_case : int = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Union[str, Any] = False
def __a ( self :List[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase__ :Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase__ :int = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase__ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any]=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :Dict = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __a ( self :Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :int ):
UpperCamelCase__ :int = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
UpperCamelCase__ :Optional[int] = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = """."""
UpperCamelCase__ :Dict = torch.manual_seed(0 )
UpperCamelCase__ :str = sag_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCamelCase__ :Optional[int] = output.images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Optional[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __a ( self :Dict ):
UpperCamelCase__ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCamelCase__ :Dict = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = """."""
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = sag_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCamelCase__ :List[str] = output.images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[str] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __a ( self :str ):
UpperCamelCase__ :str = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCamelCase__ :Tuple = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = """."""
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :int = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
UpperCamelCase__ :str = output.images
assert image.shape == (1, 5_12, 7_68, 3) | 45 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: Optional[int]=[8, 16, 32, 64] ,__lowerCAmelCase: Dict=[1, 1, 2, 1] ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int="relu" ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Dict=["stage2", "stage3", "stage4"] ,__lowerCAmelCase: List[Any]=[2, 3, 4] ,__lowerCAmelCase: Any=1 ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Any = embeddings_size
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Optional[int] = depths
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = num_labels
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[Any] = len(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : List[str] = num_groups
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = BitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : List[str] = BitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = BitModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Bit does not output attentions" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def _lowercase ( self: str ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ):
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : int = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[int] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = BitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@require_torch
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = BitConfig
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = BitModelTester(self ) | 46 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = False
@skip_mps
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionAttendAndExcitePipeline
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a : Tuple = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Optional[int] = self.get_dummy_components()
__a : Union[str, Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__a : Optional[Any] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
__a : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls : str ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Tuple ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(5_1 )
__a : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
__a : List[str] = 'a painting of an elephant with glasses'
__a : Any = [5, 7]
__a : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 47 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCAmelCase__ : Union[str, Any] = False
try:
UpperCAmelCase__ : int = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class A :
def __init__( self : str , __magic_name__ : str = None , __magic_name__ : list = [] ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = choices
lowerCAmelCase__ = prompt
if sys.platform == "win32":
lowerCAmelCase__ = "*"
else:
lowerCAmelCase__ = "➔ "
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __magic_name__ )
else:
forceWrite(self.choices[index] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int ):
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__magic_name__ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Direction , __magic_name__ : int = 1 ):
"""simple docstring"""
lowerCAmelCase__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__magic_name__ )
move_cursor(__magic_name__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__magic_name__ )] for number in range(10 )] )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = int(chr(self.current_selection ) )
lowerCAmelCase__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __magic_name__ )
else:
return
else:
return
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCAmelCase__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__magic_name__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCAmelCase__ = int(builtins.input() )
except ValueError:
lowerCAmelCase__ = default_choice
else:
lowerCAmelCase__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__magic_name__ , "\n" )
return choice
| 48 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def A__ ( __lowerCAmelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Dict = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['MobileNetV2FeatureExtractor']
a__ : List[str] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
A = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
A = {
'''RUCAIBox/mvp''': 1_024,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = MvpTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Union[str, Any] = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
__a : str = add_prefix_space
__a : Optional[Any] = pre_tok_class(**_UpperCAmelCase )
__a : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__a : Tuple = '''post_processor'''
__a : Dict = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
__a : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Any = tuple(state['''sep'''] )
if "cls" in state:
__a : Optional[int] = tuple(state['''cls'''] )
__a : Any = False
if state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Union[str, Any] = add_prefix_space
__a : str = True
if state.get('''trim_offsets''' , _UpperCAmelCase ) != trim_offsets:
__a : List[Any] = trim_offsets
__a : Optional[Any] = True
if changes_to_apply:
__a : List[str] = getattr(_UpperCAmelCase , state.pop('''type''' ) )
__a : List[str] = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
__a : Tuple = value
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : List[Any] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : List[str] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Any = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = [self.sep_token_id]
__a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 52 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
# Function to print upper half of diamond (pyramid)
def a_ ( lowerCAmelCase_ : Optional[int] ):
for i in range(0, lowerCAmelCase_ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def a_ ( lowerCAmelCase_ : str ):
for i in range(lowerCAmelCase_, 0, -1 ):
for _ in range(lowerCAmelCase_, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
_snake_case : Optional[Any] = 1
while K:
_snake_case : Any = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_snake_case : Optional[int] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 53 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
class A :
def __init__( self: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ ={}
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[str] ) -> List[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
UpperCAmelCase_ ={}
self.num_vertices += 1
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ) -> int:
'''simple docstring'''
self.add_vertex(_lowerCAmelCase )
self.add_vertex(_lowerCAmelCase )
if head == tail:
return
UpperCAmelCase_ =weight
UpperCAmelCase_ =weight
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_edges()
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase_ =list(edges[i] )
edges.sort(key=lambda _lowerCAmelCase : e[2] )
for i in range(len(_lowerCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase_ =edges[i][2] + 1
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge
UpperCAmelCase_ =weight
UpperCAmelCase_ =weight
def __str__( self: int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase_ =self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[Any]=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Graph()
if vertices is None:
UpperCAmelCase_ =[]
if edges is None:
UpperCAmelCase_ =[]
for vertex in vertices:
g.add_vertex(_lowerCAmelCase )
for edge in edges:
g.add_edge(*_lowerCAmelCase )
return g
class A :
def __init__( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ ={}
def __len__( self: int ) -> Optional[Any]:
'''simple docstring'''
return len(self.parent )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[int] ) -> Any:
'''simple docstring'''
if item in self.parent:
return self.find(_lowerCAmelCase )
UpperCAmelCase_ =item
UpperCAmelCase_ =0
return item
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(_lowerCAmelCase )
if item != self.parent[item]:
UpperCAmelCase_ =self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.find(_lowerCAmelCase )
UpperCAmelCase_ =self.find(_lowerCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase_ =roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase_ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase_ =roota
return roota
return None
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =graph.num_vertices
UpperCAmelCase_ =Graph.UnionFind()
UpperCAmelCase_ =[]
while num_components > 1:
UpperCAmelCase_ ={}
for vertex in graph.get_vertices():
UpperCAmelCase_ =-1
UpperCAmelCase_ =graph.get_edges()
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge
UpperCAmelCase_ =union_find.find(_lowerCAmelCase )
UpperCAmelCase_ =union_find.find(_lowerCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase_ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase_ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =cheap_edge[vertex]
if union_find.find(_lowerCAmelCase ) != union_find.find(_lowerCAmelCase ):
union_find.union(_lowerCAmelCase , _lowerCAmelCase )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase_ =num_components - 1
UpperCAmelCase_ =Graph.build(edges=_lowerCAmelCase )
return mst
| 54 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
SCREAMING_SNAKE_CASE :int = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = "https://pypi.org/pypi/diffusers/json"
__A = json.loads(request.urlopen(a_ ).read() )["releases"].keys()
return sorted(a_ , key=lambda a_ : version.Version(a_ ) )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
__A = Path(a_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
init_hf_modules()
__A = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
__A = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall("^\s*import\s+\.(\S+)\s*$" , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
__A = Path(a_ ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [F'''{f}.py''' for f in new_import_files]
__A = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall("^\s*import\s+(\S+)\s*$" , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , a_ , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
__A = list(set(a_ ) )
__A = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'''{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`''' )
return get_relative_imports(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = module_path.replace(os.path.sep , "." )
__A = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(a_ , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
__A = cls
return pipeline_class
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> Optional[Any]:
"""simple docstring"""
__A = str(a_ )
__A = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
__A = module_file_or_url
__A = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
__A = F'''v{revision}'''
elif revision == "main":
__A = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
__A = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
__A = "git"
__A = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
__A = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
__A = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
__A = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
__A = F'''{module_needed}.py'''
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , F'''{module_needed}.py''' , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> Any:
"""simple docstring"""
__A = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace(".py" , "" ) )
| 55 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "sentencepiece.bpe.model"}
_a : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_a : Union[str, Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_a : Dict = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__snake_case = len(self.sp_model ) - 1
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : List[str] ) -> Dict:
return len(self.sp_model )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any]="attention" ):
'''simple docstring'''
snake_case_ : str = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
snake_case_ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case_ : int = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
snake_case_ : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if split_mlp_wi:
snake_case_ : str = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
snake_case_ : Any = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
snake_case_ : Union[str, Any] = (wi_a, wi_a)
else:
snake_case_ : Dict = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
snake_case_ : List[str] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowerCAmelCase ( __UpperCamelCase : dict , *, __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : str = traverse_util.flatten_dict(variables["""target"""] )
snake_case_ : str = {"""/""".join(__UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ : Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __UpperCamelCase )
snake_case_ : List[str] = collections.OrderedDict()
# Shared embeddings.
snake_case_ : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" )
snake_case_ : List[str] = layer_norm
snake_case_ : int = k.T
snake_case_ : List[Any] = o.T
snake_case_ : str = q.T
snake_case_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : int = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase )
snake_case_ : Dict = layer_norm
if split_mlp_wi:
snake_case_ : str = wi[0].T
snake_case_ : int = wi[1].T
else:
snake_case_ : List[str] = wi.T
snake_case_ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , __UpperCamelCase , """encoder""" ).T
snake_case_ : Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """encoder""" ).T
snake_case_ : str = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : int = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" )
snake_case_ : Union[str, Any] = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : Optional[Any] = o.T
snake_case_ : Any = q.T
snake_case_ : int = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ : Union[str, Any] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case_ : Dict = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : int = o.T
snake_case_ : List[Any] = q.T
snake_case_ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : List[str] = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase )
snake_case_ : List[str] = layer_norm
if split_mlp_wi:
snake_case_ : int = wi[0].T
snake_case_ : List[Any] = wi[1].T
else:
snake_case_ : str = wi.T
snake_case_ : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[Any] = tax_relpos_bias_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" ).T
snake_case_ : List[str] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : bool ):
'''simple docstring'''
snake_case_ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ : Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ : Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case_ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : str = convert_tax_to_pytorch(
__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase , scalable_attention=__UpperCamelCase )
snake_case_ : Optional[Any] = make_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
'''simple docstring'''
snake_case_ : int = MTaConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ : Optional[Any] = UMTaEncoderModel(__UpperCamelCase )
else:
snake_case_ : int = UMTaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 58 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__A = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = BartTokenizer
def __init__(self : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]="replace" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowerCamelCase__: str =add_prefix_space
lowerCamelCase__: Optional[Any] =pre_tok_class(**UpperCAmelCase_)
lowerCamelCase__: str =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__: Dict ="post_processor"
lowerCamelCase__: Optional[Any] =getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
if tokenizer_component_instance:
lowerCamelCase__: List[str] =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__: Optional[Any] =tuple(state["sep"])
if "cls" in state:
lowerCamelCase__: List[Any] =tuple(state["cls"])
lowerCamelCase__: List[Any] =False
if state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Optional[int] =add_prefix_space
lowerCamelCase__: Optional[int] =True
if state.get("trim_offsets" , UpperCAmelCase_) != trim_offsets:
lowerCamelCase__: Union[str, Any] =trim_offsets
lowerCamelCase__: Optional[Any] =True
if changes_to_apply:
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , state.pop("type"))
lowerCamelCase__: Any =component_class(**UpperCAmelCase_)
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else value
lowerCamelCase__: str =value
def SCREAMING_SNAKE_CASE_ (self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: Any =kwargs.get("is_split_into_words" , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: str =kwargs.get("is_split_into_words" , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=None) ->str:
'''simple docstring'''
lowerCamelCase__: str =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.sep_token_id]
lowerCamelCase__: List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 59 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : int = CycleDiffusionPipeline
lowerCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
lowerCamelCase_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
lowerCamelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Optional[Any] = CLIPTextModel(__magic_name__ )
snake_case_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> Dict:
'''simple docstring'''
snake_case_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
snake_case_ : Union[str, Any] = image / 2 + 0.5
if str(__magic_name__ ).startswith('''mps''' ):
snake_case_ : int = torch.manual_seed(__magic_name__ )
else:
snake_case_ : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
snake_case_ : List[str] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.get_dummy_components()
snake_case_ : Union[str, Any] = CycleDiffusionPipeline(**__magic_name__ )
snake_case_ : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : int = pipe(**__magic_name__ )
snake_case_ : Dict = output.images
snake_case_ : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ : List[str] = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__magic_name__ , '''half''' ):
snake_case_ : Optional[int] = module.half()
snake_case_ : str = CycleDiffusionPipeline(**__magic_name__ )
snake_case_ : Any = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : List[str] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : int = pipe(**__magic_name__ )
snake_case_ : Union[str, Any] = output.images
snake_case_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ : Optional[int] = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ : Dict = init_image.resize((512, 512) )
snake_case_ : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
snake_case_ : List[str] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' )
snake_case_ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = '''A black colored car'''
snake_case_ : int = '''A blue colored car'''
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=__magic_name__ , source_prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__magic_name__ , output_type='''np''' , )
snake_case_ : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ : Union[str, Any] = init_image.resize((512, 512) )
snake_case_ : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
snake_case_ : Optional[int] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' )
snake_case_ : int = CycleDiffusionPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
snake_case_ : Any = '''A black colored car'''
snake_case_ : List[Any] = '''A blue colored car'''
snake_case_ : Any = torch.manual_seed(0 )
snake_case_ : Optional[Any] = pipe(
prompt=__magic_name__ , source_prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__magic_name__ , output_type='''np''' , )
snake_case_ : Dict = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 60 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
from math import factorial
def _A ( lowerCAmelCase_ : int = 20 ):
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 61 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'speech_to_text_2'
a : Optional[int] = ['past_key_values']
a : Union[str, Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , __lowercase : Union[str, Any]=10000 , __lowercase : List[Any]=6 , __lowercase : Tuple=2048 , __lowercase : int=4 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : Optional[int]="relu" , __lowercase : Optional[int]=256 , __lowercase : int=0.1 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=2 , __lowercase : List[Any]=True , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Tuple=2 , __lowercase : List[Any]=1024 , **__lowercase : Tuple , ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : int = d_model
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = decoder_layerdrop
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : Dict = decoder_layers
__UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : str = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 63 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase_ : int = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "AutoTokenizer"
__a = ["tokenizer"]
__a = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , lowerCAmelCase , lowerCAmelCase=None ) -> str:
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , **lowerCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__: Dict= get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(lowerCAmelCase , lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
SCREAMING_SNAKE_CASE__: Dict= None
else:
with open(lowerCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__: List[Any]= json.load(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoTokenizer.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
return cls(tokenizer=lowerCAmelCase , speaker_embeddings=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , lowerCAmelCase="speaker_embeddings" , lowerCAmelCase = False , **lowerCAmelCase , ) -> List[Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase , lowerCAmelCase , '''v2''' ) , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= {}
SCREAMING_SNAKE_CASE__: Optional[int]= save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__: Tuple= self._load_voice_preset(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , lowerCAmelCase , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(lowerCAmelCase , f'{prompt_key}_{key}.npy' )
SCREAMING_SNAKE_CASE__: Dict= tmp_dict
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , '''w''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
super().save_pretrained(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase = None , **lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__: Any= {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
SCREAMING_SNAKE_CASE__: List[Any]= get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
SCREAMING_SNAKE_CASE__: int= np.load(lowerCAmelCase )
return voice_preset_dict
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="pt" , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ) -> List[Any]:
if voice_preset is not None and not isinstance(lowerCAmelCase , lowerCAmelCase ):
if (
isinstance(lowerCAmelCase , lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__: List[Any]= self._load_voice_preset(lowerCAmelCase )
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ) and not voice_preset.endswith('''.npz''' ):
SCREAMING_SNAKE_CASE__: Tuple= voice_preset + '''.npz'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= np.load(lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.tokenizer(
lowerCAmelCase , return_tensors=lowerCAmelCase , padding='''max_length''' , max_length=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__: Dict= voice_preset
return encoded_text
| 64 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : Optional[NestedDataStructureLike[PathLike]] = None ,A : Optional[NamedSplit] = None ,A : Optional[Features] = None ,A : str = None ,A : bool = False ,A : bool = False ,A : Optional[int] = None ,**A : List[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = path_or_paths
UpperCAmelCase__ : Dict = split if split or isinstance(A ,A ) else """train"""
UpperCAmelCase__ : Dict = features
UpperCAmelCase__ : Dict = cache_dir
UpperCAmelCase__ : Tuple = keep_in_memory
UpperCAmelCase__ : Optional[int] = streaming
UpperCAmelCase__ : Any = num_proc
UpperCAmelCase__ : List[Any] = kwargs
@abstractmethod
def __lowercase ( self : Any ):
'''simple docstring'''
pass
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : Optional[Features] = None ,A : str = None ,A : bool = False ,A : bool = False ,A : Optional[int] = None ,**A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = features
UpperCAmelCase__ : str = cache_dir
UpperCAmelCase__ : Union[str, Any] = keep_in_memory
UpperCAmelCase__ : Optional[Any] = streaming
UpperCAmelCase__ : Optional[Any] = num_proc
UpperCAmelCase__ : Dict = kwargs
@abstractmethod
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
| 65 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class lowerCAmelCase_ ( __snake_case ):
pass
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __magic_name__ ( ) -> List[Any]:
_lowercase : Optional[Any] = int(os.environ['RANK'] )
_lowercase : Optional[Any] = int(os.environ['WORLD_SIZE'] )
_lowercase : Dict = ArgumentParser()
parser.add_argument('--streaming' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--num_workers' , type=SCREAMING_SNAKE_CASE , default=0 )
_lowercase : int = parser.parse_args()
_lowercase : List[Any] = args.streaming
_lowercase : Any = args.num_workers
_lowercase : List[str] = {'shards': [F"""shard_{shard_idx}""" for shard_idx in range(SCREAMING_SNAKE_CASE )]}
_lowercase : str = IterableDataset.from_generator(SCREAMING_SNAKE_CASE , gen_kwargs=SCREAMING_SNAKE_CASE )
if not streaming:
_lowercase : List[str] = Dataset.from_list(list(SCREAMING_SNAKE_CASE ) )
_lowercase : List[str] = split_dataset_by_node(SCREAMING_SNAKE_CASE , rank=SCREAMING_SNAKE_CASE , world_size=SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE )
_lowercase : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowercase : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowercase : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list ) -> bool:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(snake_case__ ) == 1:
return True
_lowercase = series[1] - series[0]
for index in range(len(snake_case__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list ) -> float:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowercase = 0
for val in series:
answer += val
return answer / len(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
from __future__ import annotations
import pandas as pd
def lowercase__ ( A_: list[int] , A_: list[int] , A_: int ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase =[0] * no_of_processes
__UpperCAmelCase =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A_ ):
__UpperCAmelCase =burst_time[i]
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =999999999
__UpperCAmelCase =0
__UpperCAmelCase =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCAmelCase =remaining_time[j]
__UpperCAmelCase =j
__UpperCAmelCase =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCAmelCase =remaining_time[short]
if minm == 0:
__UpperCAmelCase =999999999
if remaining_time[short] == 0:
complete += 1
__UpperCAmelCase =False
# Find finish time of current process
__UpperCAmelCase =increment_time + 1
# Calculate waiting time
__UpperCAmelCase =finish_time - arrival_time[short]
__UpperCAmelCase =finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCAmelCase =0
# Increment time
increment_time += 1
return waiting_time
def lowercase__ ( A_: list[int] , A_: int , A_: list[int] ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase =[0] * no_of_processes
for i in range(A_ ):
__UpperCAmelCase =burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase__ ( A_: list[int] , A_: list[int] , A_: int ) -> None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =0
for i in range(A_ ):
__UpperCAmelCase =total_waiting_time + waiting_time[i]
__UpperCAmelCase =total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__A = int(input())
__A = [0] * no_of_processes
__A = [0] * no_of_processes
__A = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__A , __A = map(int, input().split())
__A = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__A = burst_time
__A = no_of_processes
__A = waiting_time
__A = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__A = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 68 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 69 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
# flake8: noqa
# Lint as: python3
lowerCamelCase : Any = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 70 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def _A( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
lowercase , lowercase ={}, {}
if padding is not None:
lowercase =padding
if truncation is not None:
lowercase =truncation
if top_k is not None:
lowercase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ ):
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
lowercase ={'''image''': image, '''question''': question}
else:
lowercase =image
lowercase =super().__call__(snake_case_ , **snake_case_ )
return results
def _A( self , snake_case_ , snake_case_=False , snake_case_=False ):
lowercase =load_image(inputs['''image'''] )
lowercase =self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
lowercase =self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def _A( self , snake_case_ ):
lowercase =self.model(**snake_case_ )
return model_outputs
def _A( self , snake_case_ , snake_case_=5 ):
if top_k > self.model.config.num_labels:
lowercase =self.model.config.num_labels
if self.framework == "pt":
lowercase =model_outputs.logits.sigmoid()[0]
lowercase , lowercase =probs.topk(snake_case_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase =scores.tolist()
lowercase =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 72 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a , revision='aaaaaa')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
with self.assertRaisesRegex(a , 'Use `from_pt=True` to load this model'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 73 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = limit + 1
__SCREAMING_SNAKE_CASE : str = [0] * limit
for first_term in range(1 , snake_case ):
for n in range(snake_case , snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__SCREAMING_SNAKE_CASE : Dict = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase__ , 2 ) + pow(lowerCAmelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase = 1_00 ):
__lowercase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
__lowercase : Any = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 76 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
UpperCAmelCase_ = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase_ = i
while n % i == 0:
UpperCAmelCase_ = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 78 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self ):
super().setup()
UpperCAmelCase__ : Optional[Any] = self.model.config
UpperCAmelCase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase__ : Dict = int(_lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(_lowerCAmelCase ) , [f"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : str = outputs.logits
UpperCAmelCase__ : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 79 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from ... import PretrainedConfig
__UpperCamelCase : int = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case :Dict = 'nezha'
def __init__( self : int , _lowerCAmelCase : List[Any]=2_1128 , _lowerCAmelCase : Tuple=768 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[Any]=64 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=True , **_lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = max_relative_position
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout
__lowercase = use_cache
| 80 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
from math import pi
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 81 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
# Initialise PyTorch model
UpperCAmelCase_ = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_ = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase__ )}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __snake_case ( unittest.TestCase):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : int=9_9 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : int=4 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_attention_mask:
_lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCAmelCase , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''RegNetConfig'''
# Base docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 3 , snake_case = 1 , snake_case = 1 , snake_case = "relu" , **snake_case , ):
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='VALID' , groups=snake_case , use_bias=snake_case , name='convolution' , )
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
lowercase = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convolution(self.padding(snake_case ) )
lowercase = self.normalization(snake_case )
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , **snake_case ):
super().__init__(**snake_case )
lowercase = config.num_channels
lowercase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
lowercase = self.embedder(snake_case )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 2 , **snake_case ):
super().__init__(**snake_case )
lowercase = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='convolution' )
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = False ):
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , **snake_case ):
super().__init__(**snake_case )
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='pooler' )
lowercase = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase = self.pooler(snake_case )
for layer_module in self.attention:
lowercase = layer_module(snake_case )
lowercase = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 , **snake_case ):
super().__init__(**snake_case )
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
TFRegNetShortCut(snake_case , stride=snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='layer.2' ),
]
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(snake_case )
lowercase = self.shortcut(snake_case )
hidden_state += residual
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 , **snake_case ):
super().__init__(**snake_case )
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
TFRegNetShortCut(snake_case , stride=snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
lowercase = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='layer.3' ),
]
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(snake_case )
lowercase = self.shortcut(snake_case )
hidden_state += residual
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , **snake_case ):
super().__init__(**snake_case )
lowercase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
lowercase = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='layers.0' ),
*[layer(snake_case , snake_case , snake_case , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
for layer_module in self.layers:
lowercase = layer_module(snake_case )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , **snake_case ):
super().__init__(**snake_case )
lowercase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=F'''stages.{i+1}''' ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = False , snake_case = True ):
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(snake_case )
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = RegNetConfig
def __init__( self , snake_case , **snake_case ):
super().__init__(**snake_case )
lowercase = config
lowercase = TFRegNetEmbeddings(snake_case , name='embedder' )
lowercase = TFRegNetEncoder(snake_case , name='encoder' )
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='pooler' )
@unpack_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(snake_case , training=snake_case )
lowercase = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
lowercase = encoder_outputs[0]
lowercase = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
lowercase = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
lowercase = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = RegNetConfig
_UpperCamelCase : List[Any] = """regnet"""
_UpperCamelCase : Optional[int] = """pixel_values"""
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowerCamelCase , )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , *snake_case , **snake_case ):
super().__init__(snake_case , *snake_case , **snake_case )
lowercase = TFRegNetMainLayer(snake_case , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case=False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowerCamelCase , )
class A_ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , *snake_case , **snake_case ):
super().__init__(snake_case , *snake_case , **snake_case )
lowercase = config.num_labels
lowercase = TFRegNetMainLayer(snake_case , name='regnet' )
# classification head
lowercase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case=False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier[0](snake_case )
lowercase = self.classifier[1](snake_case )
lowercase = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
| 84 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = 0, 0, 0
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = min(lowercase__ , lowercase__ , lowercase__ )
ugly_nums.append(lowercase__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Dict = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 85 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :Any = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = ['ChineseCLIPFeatureExtractor']
__a :List[Any] = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
return "".join(chr(ord(lowercase_ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : int = data
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
def _snake_case ( ):
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCamelCase : queue.Queue = queue.Queue()
_lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) )
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Tuple = q.get()
_lowerCamelCase : Any = F'Enter the left node of {node_found.data}: '
_lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : Dict = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[str] = left_node
q.put(__snake_case )
_lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: '
_lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[Any] = right_node
q.put(__snake_case )
raise
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Optional[Any] = []
while not q.empty():
_lowerCamelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__snake_case )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__snake_case )
_lowerCamelCase : Tuple = n.left
# end of while means current node doesn't have left child
_lowerCamelCase : Optional[Any] = stack.pop()
# start to traverse its right child
_lowerCamelCase : Dict = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : int = node
while n or stack:
while n:
stack.append(__snake_case )
_lowerCamelCase : Any = n.left
_lowerCamelCase : Optional[Any] = stack.pop()
print(n.data , end=""",""" )
_lowerCamelCase : List[Any] = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Optional[Any] = node
stacka.append(__snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCamelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 88 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger()
def UpperCamelCase_( ) -> Tuple:
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : int = parser.parse_args()
return args.f
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = {}
_lowercase : int = os.path.join(lowerCamelCase_ , 'all_results.json' )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , 'r' ) as f:
_lowercase : Any = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCamelCase_( ) -> Tuple:
_lowercase : Optional[int] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCamelCase( _a ):
@classmethod
def UpperCamelCase ( cls) -> Tuple:
"""simple docstring"""
_lowercase : str = tempfile.mkdtemp()
_lowercase : Union[str, Any] = os.path.join(cls.tmpdir, 'default_config.yml')
write_basic_config(save_location=cls.configPath)
_lowercase : int = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCamelCase ( cls) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.get_auto_remove_tmp_dir()
_lowercase : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
run_command(self._launch_args + testargs)
_lowercase : Union[str, Any] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.7_5)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'glue_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowercase : Optional[int] = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
_lowercase : List[str] = get_results(lowerCamelCase)
self.assertLess(result['perplexity'], 1_00)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'clm_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowercase : Optional[int] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : str = get_results(lowerCamelCase)
self.assertLess(result['perplexity'], 42)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'mlm_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : str = 7 if get_gpu_count() > 1 else 2
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Optional[int] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.7_5)
self.assertLess(result['train_loss'], 0.5)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'ner_no_trainer')))
@unittest.skip(reason='Fix me @muellerzr')
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : str = self.get_auto_remove_tmp_dir()
_lowercase : Tuple = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Optional[Any] = get_results(lowerCamelCase)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'], 28)
self.assertGreaterEqual(result['eval_exact'], 28)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'qa_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.get_auto_remove_tmp_dir()
_lowercase : List[str] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : int = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'swag_no_trainer')))
@slow
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Optional[int] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_rouge1'], 10)
self.assertGreaterEqual(result['eval_rouge2'], 2)
self.assertGreaterEqual(result['eval_rougeL'], 7)
self.assertGreaterEqual(result['eval_rougeLsum'], 7)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'summarization_no_trainer')))
@slow
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.get_auto_remove_tmp_dir()
_lowercase : List[str] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : int = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_bleu'], 30)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'translation_no_trainer')))
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
_lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowercase : Any = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs)
_lowercase : str = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_overall_accuracy'], 0.1_0)
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : str = self.get_auto_remove_tmp_dir()
_lowercase : Optional[int] = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
run_command(self._launch_args + testargs)
_lowercase : List[str] = get_results(lowerCamelCase)
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'], 0.6)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'step_1')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'image_classification_no_trainer')))
| 89 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
import math
import sys
def _snake_case ( A ) -> str:
lowerCAmelCase__ = ''''''
try:
with open(A , '''rb''' ) as binary_file:
lowerCAmelCase__ = binary_file.read()
for dat in data:
lowerCAmelCase__ = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _snake_case ( A ) -> str:
lowerCAmelCase__ = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase__ , lowerCAmelCase__ = '''''', ''''''
lowerCAmelCase__ = len(A )
for i in range(len(A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ = last_match_id + '''0'''
if math.loga(A ).is_integer():
lowerCAmelCase__ = {}
for curr_key in list(A ):
lowerCAmelCase__ = lexicon.pop(A )
lowerCAmelCase__ = new_lex
lowerCAmelCase__ = last_match_id + '''1'''
index += 1
lowerCAmelCase__ = ''''''
return result
def _snake_case ( A , A ) -> None:
lowerCAmelCase__ = 8
try:
with open(A , '''wb''' ) as opened_file:
lowerCAmelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A ) , A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _snake_case ( A ) -> str:
lowerCAmelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ = data_bits[counter:]
lowerCAmelCase__ = data_bits[counter + 1 :]
return data_bits
def _snake_case ( A , A ) -> None:
lowerCAmelCase__ = read_file_binary(A )
lowerCAmelCase__ = remove_prefix(A )
lowerCAmelCase__ = decompress_data(A )
write_file_binary(A , A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 90 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = BarthezTokenizer
_lowerCamelCase: Union[str, Any] = BarthezTokenizerFast
_lowerCamelCase: Tuple = True
_lowerCamelCase: List[Any] = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
super().setUp()
A = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=A_ )
A = tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(A_ ) ,10_1122 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size ,10_1122 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A = [0, 57, 3018, 7_0307, 91, 2]
A = self.tokenizer(
A_ ,max_length=len(A_ ) ,padding=A_ ,truncation=A_ ,return_tensors='pt' )
self.assertIsInstance(A_ ,A_ )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
# fmt: off
A = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=A_ ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=A_ ,) | 91 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCAmelCase ( __magic_name__ : str ) -> List[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase : Tuple =model_type_to_module_name(__magic_name__ )
lowercase : Optional[int] =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , '''__name__''' , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase : int =importlib.import_module('''transformers''' )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def _lowerCAmelCase ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : Optional[Any] , ) -> str:
lowercase : List[str] =get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__magic_name__ , encoding='''utf-8''' ) as reader:
return json.load(__magic_name__ )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any ):
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase__ )
def lowerCamelCase_ ( cls : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =kwargs.pop('''config''' , UpperCAmelCase__ )
lowercase : List[str] =kwargs.pop('''trust_remote_code''' , UpperCAmelCase__ )
lowercase : Optional[Any] =True
lowercase , lowercase : Tuple =ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =config_dict.get('''image_processor_type''' , UpperCAmelCase__ )
lowercase : Optional[Any] =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
lowercase : Optional[Any] =config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase : Union[str, Any] =config_dict.pop('''feature_extractor_type''' , UpperCAmelCase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
lowercase : Tuple =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase : Tuple =config_dict['''auto_map''']['''AutoFeatureExtractor''']
lowercase : List[str] =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Tuple =AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# It could be in `config.image_processor_type``
lowercase : Any =getattr(UpperCAmelCase__ , '''image_processor_type''' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
lowercase : Optional[Any] =config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
lowercase : Any =image_processor_class_from_name(UpperCAmelCase__ )
lowercase : str =image_processor_auto_map is not None
lowercase : Dict =image_processor_class is not None or type(UpperCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING
lowercase : Tuple =resolve_trust_remote_code(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if has_remote_code and trust_remote_code:
lowercase : Optional[int] =get_class_from_dynamic_module(
UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Union[str, Any] =kwargs.pop('''code_revision''' , UpperCAmelCase__ )
if os.path.isdir(UpperCAmelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING:
lowercase : str =IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase__ )]
return image_processor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase__ , UpperCAmelCase__ )
| 92 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :List[str] = """swin"""
__magic_name__ :List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = image_size
lowerCAmelCase__ :Tuple = patch_size
lowerCAmelCase__ :Tuple = num_channels
lowerCAmelCase__ :Tuple = embed_dim
lowerCAmelCase__ :Union[str, Any] = depths
lowerCAmelCase__ :Union[str, Any] = len(__UpperCAmelCase )
lowerCAmelCase__ :int = num_heads
lowerCAmelCase__ :str = window_size
lowerCAmelCase__ :Union[str, Any] = mlp_ratio
lowerCAmelCase__ :Optional[int] = qkv_bias
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :List[Any] = use_absolute_embeddings
lowerCAmelCase__ :Dict = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :Union[str, Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :Any = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :str = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 93 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase_ ( __A : Tuple=3_2 , __A : Optional[Any]=1_0 , __A : Optional[int]=1_0_0 , __A : str=1_0_2_6 , __A : List[str]=True , __A : Dict="data/tokenized_stories_train_wikitext103.jbl" , __A : Tuple="igf_context_pairs.jbl" , ) -> Tuple:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowercase , lowercase : str =generate_datasets(
__A , __A , number=__A , min_len=1_0_2_6 , trim=__A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase : Optional[Any] =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase : List[str] =load_gpta('''gpt2''' ).to(__A )
print('''computing perplexity on objective set''' )
lowercase : Tuple =compute_perplexity(__A , __A , __A ).item()
print('''perplexity on objective set:''' , __A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__A , __A , __A , __A , __A , __A , __A , __A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase_ ( __A : Optional[Any] , __A : int=1_5 , __A : Union[str, Any]=1_2_8 , __A : List[Any]=1_0_0 , __A : Optional[Any]="igf_model.pt" , ) -> Union[str, Any]:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
lowercase : List[str] =GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase : str =SecondaryLearner(__A )
# Train secondary learner
lowercase : Union[str, Any] =train_secondary_learner(
__A , __A , max_epochs=__A , batch_size=__A , eval_freq=1_0_0 , igf_model_path=__A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase_ ( __A : Any , __A : Dict , __A : List[str] , __A : List[str]=3_2 , __A : Any=1_0_0_0 , __A : Union[str, Any]=1_6 , __A : int=1.0 , __A : Optional[Any]=recopy_gpta , __A : Optional[Any]=None , __A : Optional[Any]=1_0 , __A : str="gpt2_finetuned.pt" , ) -> Dict:
"""simple docstring"""
lowercase : Tuple =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase : Tuple =RandomSampler(__A )
lowercase : List[str] =DataLoader(__A , sampler=__A )
lowercase : List[str] =max_steps // (len(__A )) + 1
lowercase : List[Any] =0
lowercase : Any =torch.zeros((1, context_len) , dtype=torch.long , device=__A )
lowercase , lowercase , lowercase : Union[str, Any] =recopy_model(__A , __A , __A )
model.train()
if secondary_learner is not None:
secondary_learner.to(__A )
secondary_learner.eval()
lowercase : int =[]
lowercase : List[Any] =0
lowercase : Optional[int] =[]
lowercase : Any =[]
# Compute the performance of the transformer model at the beginning
lowercase : str =compute_perplexity(__A , __A , __A )
test_perps.append(__A )
print('''Test perplexity, step''' , __A , ''':''' , __A )
for epoch in range(int(__A ) ):
for step, example in enumerate(__A ):
torch.cuda.empty_cache()
lowercase : int =random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase : Optional[int] =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase : int =model(__A , labels=__A )
lowercase : List[str] =True
if secondary_learner is not None:
lowercase : str =secondary_learner.forward(
torch.tensor(__A , dtype=torch.long , device=__A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
lowercase : List[str] =-1
if predicted_q < threshold:
lowercase : Union[str, Any] =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase : List[Any] =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase : Any =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase : List[Any] =compute_perplexity(__A , __A , __A )
test_perps.append(__A )
print('''Test perplexity, step''' , __A , ''':''' , __A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , __A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : List[str] =argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__A , default=__A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__A , default=__A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__A , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__A , default=__A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=3_2 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_0_0 , type=__A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_0_0 , type=__A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_0_0_0 , type=__A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_2_8 , type=__A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=1_6 , type=__A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=1_0 , type=__A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_0_0 , type=__A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_0_2_6 , type=__A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=1_5 , type=__A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__A , type=__A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__A , type=__A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=__A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase : Optional[int] =joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase : str =training_secondary_learner(
__A , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase : Any =GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
lowercase , lowercase : List[Any] =generate_datasets(
context_len=3_2 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_0_0 , min_len=1_0_2_6 , trim=__A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__A , __A , __A , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=__A , secondary_learner=__A , eval_interval=1_0 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 94 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCamelCase_ = '''>>zh<<'''
lowerCamelCase_ = '''Helsinki-NLP/'''
if is_torch_available():
lowerCamelCase_ = '''pt'''
elif is_tf_available():
lowerCamelCase_ = '''tf'''
else:
lowerCamelCase_ = '''jax'''
@require_sentencepiece
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = MarianTokenizer
__magic_name__ = False
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : Optional[Any] = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
UpperCAmelCase_ : str = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : Tuple ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : List[str] = "</s>"
UpperCAmelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
UpperCAmelCase_ : Optional[int] = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = [x.name for x in Path(lowerCAmelCase_ ).glob("*" )]
self.assertIn("source.spm" , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = tok(
["I am a small frog" * 1_000, "I am a small frog"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# fmt: off
UpperCAmelCase_ : int = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : int = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
UpperCAmelCase_ : Dict = "Tämä on testi"
UpperCAmelCase_ : Optional[Any] = "This is a test"
UpperCAmelCase_ : Union[str, Any] = [76, 7, 2_047, 2]
UpperCAmelCase_ : Optional[int] = [69, 12, 11, 940, 2]
UpperCAmelCase_ : int = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'RegNetConfig'
# Base docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = 'tabby, tabby cat'
__lowerCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , **__snake_case : str , ) -> Any:
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__magic_name__: Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__magic_name__: Dict = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding="""VALID""" , groups=__snake_case , use_bias=__snake_case , name="""convolution""" , )
__magic_name__: int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__magic_name__: Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str ) -> Dict:
__magic_name__: Optional[Any] = self.convolution(self.padding(__snake_case ) )
__magic_name__: Union[str, Any] = self.normalization(__snake_case )
__magic_name__: Tuple = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Dict ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: Tuple = config.num_channels
__magic_name__: Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCamelCase__ ( self : List[str] , __snake_case : Dict ) -> int:
__magic_name__: Union[str, Any] = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__magic_name__: Any = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
__magic_name__: Dict = self.embedder(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int = 2 , **__snake_case : Any ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name="""convolution""" )
__magic_name__: Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : int , __snake_case : int , **__snake_case : str ) -> str:
super().__init__(**__snake_case )
__magic_name__: Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
__magic_name__: Optional[Any] = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] ) -> List[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__magic_name__: List[str] = self.pooler(__snake_case )
for layer_module in self.attention:
__magic_name__: List[str] = layer_module(__snake_case )
__magic_name__: Optional[Any] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Optional[int] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: List[str] = in_channels != out_channels or stride != 1
__magic_name__: Union[str, Any] = max(1 , out_channels // config.groups_width )
__magic_name__: Optional[Any] = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__magic_name__: List[str] = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.2""" ),
]
__magic_name__: Any = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any ) -> Union[str, Any]:
__magic_name__: Any = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[int] = layer_module(__snake_case )
__magic_name__: str = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: int = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Union[str, Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: str = in_channels != out_channels or stride != 1
__magic_name__: Dict = max(1 , out_channels // config.groups_width )
__magic_name__: Tuple = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__magic_name__: str = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.3""" ),
]
__magic_name__: Optional[int] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : List[str] , __snake_case : int ) -> Dict:
__magic_name__: int = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[Any] = layer_module(__snake_case )
__magic_name__: Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: Any = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , **__snake_case : List[Any] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: int = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__magic_name__: Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name="""layers.0""" ),
*[layer(__snake_case , __snake_case , __snake_case , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : int , __snake_case : Union[str, Any] ) -> Tuple:
for layer_module in self.layers:
__magic_name__: Dict = layer_module(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Optional[Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__magic_name__: Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=F'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , __snake_case : tf.Tensor , __snake_case : bool = False , __snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__magic_name__: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__magic_name__: Optional[Any] = hidden_states + (hidden_state,)
__magic_name__: Optional[Any] = stage_module(__snake_case )
if output_hidden_states:
__magic_name__: int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class __A ( tf.keras.layers.Layer ):
UpperCAmelCase__ = RegNetConfig
def __init__( self : Optional[int] , __snake_case : Any , **__snake_case : List[str] ) -> int:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = config
__magic_name__: Optional[int] = TFRegNetEmbeddings(__snake_case , name="""embedder""" )
__magic_name__: int = TFRegNetEncoder(__snake_case , name="""encoder""" )
__magic_name__: int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
@unpack_inputs
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: int = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.embedder(__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: str = encoder_outputs[0]
__magic_name__: List[Any] = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
__magic_name__: int = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
__magic_name__: List[str] = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__magic_name__: List[str] = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = RegNetConfig
UpperCAmelCase__ = "regnet"
UpperCAmelCase__ = "pixel_values"
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__lowerCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , *__snake_case : List[Any] , **__snake_case : Tuple ) -> Tuple:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: List[str] = TFRegNetMainLayer(__snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Dict , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : int=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , __snake_case : RegNetConfig , *__snake_case : Any , **__snake_case : Any ) -> Optional[Any]:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = config.num_labels
__magic_name__: Tuple = TFRegNetMainLayer(__snake_case , name="""regnet""" )
# classification head
__magic_name__: List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : List[str] , __snake_case : tf.Tensor = None , __snake_case : tf.Tensor = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__magic_name__: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: Any = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__magic_name__: Optional[int] = self.classifier[0](__snake_case )
__magic_name__: List[Any] = self.classifier[1](__snake_case )
__magic_name__: Optional[int] = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
__magic_name__: List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 96 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
__a = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__a = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__a = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def a ( snake_case__: Optional[Any] , snake_case__: str , snake_case__: Optional[int] , snake_case__: bool , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(snake_case__ )
lowercase_ = np.array(snake_case__ )
if reduce_labels:
lowercase_ = 255
lowercase_ = label - 1
lowercase_ = 255
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(snake_case__ , snake_case__ )
lowercase_ = pred_label[mask]
lowercase_ = np.array(snake_case__ )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a ( snake_case__: int , snake_case__: Optional[int] , snake_case__: Any , snake_case__: bool , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ):
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a ( snake_case__: Union[str, Any] , snake_case__: Any , snake_case__: List[Any] , snake_case__: bool , snake_case__: Optional[int] = None , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(snake_case__ )
lowercase_ = np.nanmean(snake_case__ )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(snake_case__ , nan=snake_case__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : str ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> str:
lowercase_ = mean_iou(
results=SCREAMING_SNAKE_CASE_ , gt_seg_maps=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , ignore_index=SCREAMING_SNAKE_CASE_ , nan_to_num=SCREAMING_SNAKE_CASE_ , label_map=SCREAMING_SNAKE_CASE_ , reduce_labels=SCREAMING_SNAKE_CASE_ , )
return iou_result
| 97 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if not (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__a = len(lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
__a = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__a = 0
__a = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__a = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__a = i
__a = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = VideoToVideoSDPipeline
lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
lowerCamelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
lowerCamelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCamelCase__ : int = False
# No `output_type`.
lowerCamelCase__ : List[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(A_ )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline(**A_ )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ )
SCREAMING_SNAKE_CASE__ = '''np'''
SCREAMING_SNAKE_CASE__ = sd_pipe(**A_ ).frames
SCREAMING_SNAKE_CASE__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = torch.randn((1, 10, 3, 10_24, 5_76) , generator=A_ )
SCREAMING_SNAKE_CASE__ = video.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = '''Spiderman is surfing'''
SCREAMING_SNAKE_CASE__ = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='''pt''' ).frames
SCREAMING_SNAKE_CASE__ = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 100 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align_text_model"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_token_id
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
SCREAMING_SNAKE_CASE_ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align_vision_model"""
def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 6_0_0 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 3.1 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase__ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase__ = [] , lowerCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ = 0.25 , lowerCAmelCase__ = "swish" , lowerCAmelCase__ = 2_5_6_0 , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.99 , lowerCAmelCase__ = 0.2 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : int = width_coefficient
SCREAMING_SNAKE_CASE_ : int = depth_coefficient
SCREAMING_SNAKE_CASE_ : Optional[int] = depth_divisor
SCREAMING_SNAKE_CASE_ : int = kernel_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = in_channels
SCREAMING_SNAKE_CASE_ : Dict = out_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = depthwise_padding
SCREAMING_SNAKE_CASE_ : List[Any] = strides
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_block_repeats
SCREAMING_SNAKE_CASE_ : int = expand_ratios
SCREAMING_SNAKE_CASE_ : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dim
SCREAMING_SNAKE_CASE_ : int = pooling_type
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = batch_norm_eps
SCREAMING_SNAKE_CASE_ : int = batch_norm_momentum
SCREAMING_SNAKE_CASE_ : Tuple = drop_connect_rate
SCREAMING_SNAKE_CASE_ : List[str] = sum(lowerCAmelCase__ ) * 4
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
SCREAMING_SNAKE_CASE_ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align"""
_UpperCAmelCase = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=6_4_0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : List[Any] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ : Tuple = AlignTextConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = AlignVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = projection_dim
SCREAMING_SNAKE_CASE_ : str = temperature_init_value
SCREAMING_SNAKE_CASE_ : int = initializer_range
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Any = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 101 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__magic_name__ : int = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__magic_name__ : Dict = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__magic_name__ : Tuple = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ : Tuple = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__magic_name__ : Tuple = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def UpperCamelCase ():
UpperCamelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase : List[str] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCamelCase : Dict = collections.defaultdict(SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = collections.defaultdict(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = collections.defaultdict(SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = None
if _re_tf_models.match(SCREAMING_SNAKE_CASE ) is not None:
UpperCamelCase : List[str] = tf_models
UpperCamelCase : List[str] = _re_tf_models.match(SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE ) is not None:
UpperCamelCase : int = flax_models
UpperCamelCase : Tuple = _re_flax_models.match(SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE ) is not None:
UpperCamelCase : Optional[Any] = pt_models
UpperCamelCase : str = _re_pt_models.match(SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCamelCase : List[str] = True
break
# Try again after removing the last word in the name
UpperCamelCase : int = """""".join(camel_case_split(SCREAMING_SNAKE_CASE )[:-1] )
UpperCamelCase : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
all_models.sort()
UpperCamelCase : Optional[Any] = {"""model_type""": all_models}
UpperCamelCase : List[Any] = [pt_models[t] for t in all_models]
UpperCamelCase : Dict = [tf_models[t] for t in all_models]
UpperCamelCase : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCamelCase : Optional[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCamelCase : Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCamelCase : int = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCamelCase : str = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCamelCase : Dict = """AutoTokenizer"""
UpperCamelCase : int = [processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCamelCase : Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
UpperCamelCase : str = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
continue
# First extract all model_names
UpperCamelCase : List[str] = []
for name in getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).values():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model_names.append(SCREAMING_SNAKE_CASE )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = get_frameworks_table()
UpperCamelCase : str = Dataset.from_pandas(SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = Dataset.from_json(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(SCREAMING_SNAKE_CASE ) )
}
UpperCamelCase : Any = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCamelCase : Tuple = sorted(table.keys() )
UpperCamelCase : str = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
UpperCamelCase : Tuple = Dataset.from_pandas(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE , """pipeline_tags.json""" ) )
if commit_sha is not None:
UpperCamelCase : Optional[Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
UpperCamelCase : Optional[int] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=SCREAMING_SNAKE_CASE , repo_type="""dataset""" , token=SCREAMING_SNAKE_CASE , commit_message=SCREAMING_SNAKE_CASE , )
def UpperCamelCase ():
UpperCamelCase : Dict = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCamelCase : str = transformers_module.pipelines.SUPPORTED_TASKS
UpperCamelCase : Optional[Any] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCamelCase : List[Any] = pipeline_tasks[key]["""pt"""]
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCamelCase : Optional[Any] = model[0]
UpperCamelCase : Optional[int] = model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = """, """.join(SCREAMING_SNAKE_CASE )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
__magic_name__ : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 102 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ['''input_features''']
def __init__( self : Dict , __lowerCamelCase : int=8_0 , __lowerCamelCase : List[Any]=1_6_0_0_0 , __lowerCamelCase : List[str]=1_6_0 , __lowerCamelCase : int=3_0 , __lowerCamelCase : Union[str, Any]=4_0_0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[Any]=False , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = n_fft
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = chunk_length * sampling_rate
_snake_case = self.n_samples // hop_length
_snake_case = sampling_rate
_snake_case = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : np.array ):
"""simple docstring"""
_snake_case = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_snake_case = log_spec[:, :-1]
_snake_case = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
_snake_case = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
_snake_case = np.array(__lowerCamelCase , np.intaa )
_snake_case = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
_snake_case = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_snake_case = padding_value
normed_input_values.append(__lowerCamelCase )
else:
_snake_case = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_snake_case = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
_snake_case = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case = [np.asarray([raw_speech] ).T]
_snake_case = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
_snake_case = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_snake_case = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_snake_case = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
_snake_case = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
_snake_case = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
_snake_case = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_snake_case = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_snake_case = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 103 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 104 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import torch
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=1 ,snake_case__=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = n_token
SCREAMING_SNAKE_CASE_ : Tuple = d_embed
SCREAMING_SNAKE_CASE_ : int = d_proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cutoffs + [n_token]
SCREAMING_SNAKE_CASE_ : Dict = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ : int = div_val
SCREAMING_SNAKE_CASE_ : Tuple = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ : str = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
SCREAMING_SNAKE_CASE_ : int = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE_ : int = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__ ,snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__ ,r_idx - l_idx ) )
SCREAMING_SNAKE_CASE_ : Any = keep_order
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if proj is None:
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE_ : int = nn.functional.linear(snake_case__ ,proj.t().contiguous() )
SCREAMING_SNAKE_CASE_ : Any = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=False ):
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE_ : Dict = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : str = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE_ : Tuple = labels != -100
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : int = (
-nn.functional.log_softmax(snake_case__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : str = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE_ : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE_ : Any = labels.index_select(0 ,snake_case__ ) - l_idx
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob.index_select(0 ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = hidden.index_select(0 ,snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : Any = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : int = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
if labels is not None:
if (hasattr(self ,'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,snake_case__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case ( self ,snake_case__ ):
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE_ : str = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
return out
| 105 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = 'ylacombe/bark-small'
A = tempfile.mkdtemp()
A = 'en_speaker_1'
A = 'This is a test string'
A = 'speaker_embeddings_path.json'
A = 'speaker_embeddings'
def __UpperCamelCase ( self : List[str] , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> List[str]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __UpperCamelCase ( self : int ) -> List[str]:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
'semantic_prompt': np.ones(__UpperCamelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=__UpperCamelCase )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__UpperCamelCase , **__UpperCamelCase )
A = processor(text=self.input_string , voice_preset=__UpperCamelCase )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=__UpperCamelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 106 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase_ ( unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_A = load_tool('text-classification' )
self.tool.setup()
_A = load_tool('text-classification', remote=UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = self.tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = self.remote_tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_A = self.tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = self.remote_tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
| 107 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__a: Optional[int] = random.Random()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=1.0 , __snake_case=None , __snake_case=None ) -> Any:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Optional[Any]=400 , lowerCamelCase : List[Any]=2000 , lowerCamelCase : List[str]=24 , lowerCamelCase : str=24 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=1_6000 , lowerCamelCase : List[str]=True , lowerCamelCase : Dict=True , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : List[Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Dict=False ) -> Optional[int]:
"""simple docstring"""
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = SpeechaTextFeatureExtractionTester(self )
def lowerCamelCase ( self : List[str] , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(lowerCamelCase , padding=lowerCamelCase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(lowerCamelCase )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["""longest""", """max_length""", """do_not_pad"""]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = feature_extractor(
lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_attention_mask=lowerCamelCase )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["""longest""", """max_length""", """do_not_pad"""]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = feature_extractor(
lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase , return_tensors="""np""" , return_attention_mask=lowerCamelCase )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
lowerCamelCase , padding="""max_length""" , max_length=4 , truncation=lowerCamelCase , return_tensors="""np""" , return_attention_mask=lowerCamelCase , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
lowerCamelCase , padding="""longest""" , max_length=4 , truncation=lowerCamelCase , return_tensors="""np""" , return_attention_mask=lowerCamelCase , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
lowerCamelCase , padding="""longest""" , max_length=16 , truncation=lowerCamelCase , return_tensors="""np""" , return_attention_mask=lowerCamelCase , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
_UpperCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
# fmt: off
_UpperCAmelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) ) | 108 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
def get_matched_characters(__UpperCAmelCase , __UpperCAmelCase ) -> str:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__SCREAMING_SNAKE_CASE = int(max(0 , i - limit ) )
__SCREAMING_SNAKE_CASE = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = f"""{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}"""
return "".join(__UpperCAmelCase )
# matching characters
__SCREAMING_SNAKE_CASE = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# transposition
__SCREAMING_SNAKE_CASE = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase , __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__SCREAMING_SNAKE_CASE = 0.0
else:
__SCREAMING_SNAKE_CASE = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__SCREAMING_SNAKE_CASE = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 109 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[str] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["PoolFormerFeatureExtractor"]
_lowerCAmelCase : Union[str, Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
def __a ( ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 1
while len(A__ ) < 1E6:
constant.append(str(A__ ) )
i += 1
SCREAMING_SNAKE_CASE = "".join(A__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase : Dict = get_logger(__name__)
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0 ):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A : str = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : Optional[int] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : Dict = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : Union[str, Any] = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : Union[str, Any] = os.path.join(lowerCamelCase_ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'Saving model to {ckpt_dir}' )
A : str = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase_ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
A : List[Any] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading model from {input_model_file}' )
A : List[str] = torch.load(lowerCamelCase_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : str = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading model from {input_model_file}' )
A : List[Any] = torch.load(lowerCamelCase_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : List[str] = (
os.path.join(lowerCamelCase_ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A : Tuple = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase_ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , planner=DefaultLoadPlanner() , )
A : Tuple = state_dict["model"]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0 ):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A : List[str] = FSDP.optim_state_dict(lowerCamelCase_ , lowerCamelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A : List[str] = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A : int = os.path.join(lowerCamelCase_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A : int = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A : Optional[Any] = torch.load(lowerCamelCase_ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A : Any = (
os.path.join(lowerCamelCase_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A : Dict = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , )
A : Union[str, Any] = optim_state["optimizer"]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A : Optional[Any] = FSDP.optim_state_dict_to_load(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
optimizer.load_state_dict(lowerCamelCase_ )
| 542 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( __snake_case ):
def wrapper(*__snake_case , **__snake_case ):
__lowerCAmelCase = timeit.default_timer()
__lowerCAmelCase = func(*__snake_case , **__snake_case )
__lowerCAmelCase = timeit.default_timer() - starttime
return delta
__lowerCAmelCase = func.__name__
return wrapper
def __lowerCAmelCase ( __snake_case , __snake_case=100 , __snake_case=None ):
__lowerCAmelCase = []
__lowerCAmelCase = seq_shapes or {}
for i in range(__snake_case ):
__lowerCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case , _ArrayXD ):
__lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case , datasets.Value ):
if v.dtype == "string":
__lowerCAmelCase = "The small grey turtle was surprisingly fast when challenged."
else:
__lowerCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case , datasets.Sequence ):
while isinstance(__snake_case , datasets.Sequence ):
__lowerCAmelCase = v.feature
__lowerCAmelCase = seq_shapes[k]
__lowerCAmelCase = np.random.rand(*__snake_case ).astype(v.dtype )
__lowerCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=100 , __snake_case=None ):
__lowerCAmelCase = generate_examples(__snake_case , num_examples=__snake_case , seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case , path=__snake_case ) as writer:
for key, record in dummy_data:
__lowerCAmelCase = features.encode_example(__snake_case )
writer.write(__snake_case )
__lowerCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__lowerCAmelCase = datasets.Dataset.from_file(filename=__snake_case , info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : int = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
snake_case_ : Union[str, Any] = True if "large" in model_name or "huge" in model_name else False
snake_case_ : str = True if "large" in model_name or "huge" in model_name else False
snake_case_ : Union[str, Any] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ : Optional[int] = [3, 3, 3, 3]
snake_case_ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ : List[str] = [4, 4, 4, 4]
snake_case_ : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ : str = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ : int = [3, 3, 3, 3]
else:
snake_case_ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ : Dict = 96
elif "small" in model_name:
snake_case_ : int = 96
elif "base" in model_name:
snake_case_ : int = 1_28
elif "large" in model_name:
snake_case_ : Any = 1_92
elif "xlarge" in model_name:
snake_case_ : Tuple = 2_56
elif "huge" in model_name:
snake_case_ : List[Any] = 3_52
# set label information
snake_case_ : str = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
snake_case_ : List[str] = "imagenet-22k-id2label.json"
else:
snake_case_ : List[Any] = "imagenet-1k-id2label.json"
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type="dataset" ) , "r" ) )
snake_case_ : Union[str, Any] = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case_ : Union[str, Any] = FocalNetConfig(
embed_dim=_a , depths=_a , focal_levels=_a , focal_windows=_a , use_conv_embed=_a , idalabel=_a , labelaid=_a , use_post_layernorm=_a , use_layerscale=_a , )
return config
def lowerCAmelCase__ ( _a : List[str] ):
if "patch_embed.proj" in name:
snake_case_ : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case_ : Optional[Any] = "encoder." + name
if "encoder.layers" in name:
snake_case_ : Dict = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
snake_case_ : List[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
snake_case_ : Tuple = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ : Optional[Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ : List[str] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ : Tuple = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
snake_case_ : Optional[int] = "layernorm.weight"
if name == "norm.bias":
snake_case_ : str = "layernorm.bias"
if "head" in name:
snake_case_ : str = name.replace("head" , "classifier" )
else:
snake_case_ : Tuple = "focalnet." + name
return name
def lowerCAmelCase__ ( _a : int , _a : int , _a : str=False ):
snake_case_ : Union[str, Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
snake_case_ : Optional[Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , _a )
snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(_a , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
snake_case_ : Tuple = state_dict.pop(_a )
snake_case_ : Union[str, Any] = val
snake_case_ : Union[str, Any] = get_focalnet_config(_a )
snake_case_ : List[str] = FocalNetForImageClassification(_a )
model.eval()
# load state dict
model.load_state_dict(_a )
# verify conversion
snake_case_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : List[Any] = BitImageProcessor(
do_resize=_a , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_a , crop_size=2_24 , do_normalize=_a , image_mean=_a , image_std=_a , )
snake_case_ : Dict = Image.open(requests.get(_a , stream=_a ).raw )
snake_case_ : Union[str, Any] = processor(images=_a , return_tensors="pt" )
snake_case_ : Dict = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ : Tuple = image_transforms(_a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _a , atol=1E-4 )
snake_case_ : int = model(**_a )
snake_case_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ : Dict = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ : List[Any] = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
snake_case_ : List[str] = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
snake_case_ : Dict = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
snake_case_ : Union[str, Any] = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
snake_case_ : int = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
processor.save_pretrained(_a )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowercase : int = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 568 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
import os
def _A ( ):
'''simple docstring'''
with open(os.path.dirname(UpperCAmelCase ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCAmelCase ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 531 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( lowercase__, unittest.TestCase ):
lowerCamelCase_ = ShapEPipeline
lowerCamelCase_ = ["prompt"]
lowerCamelCase_ = ["prompt"]
lowerCamelCase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCamelCase_ = False
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return 8
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
A : int = PriorTransformer(**snake_case_ )
return model
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
A : str = ShapERenderer(**snake_case_ )
return model
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Optional[Any] = self.dummy_prior
A : str = self.dummy_text_encoder
A : Any = self.dummy_tokenizer
A : List[Any] = self.dummy_renderer
A : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=snake_case_ , clip_sample=snake_case_ , clip_sample_range=1.0 , )
A : Optional[Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith('''mps''' ):
A : Optional[int] = torch.manual_seed(snake_case_ )
else:
A : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A : Dict = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Tuple = "cpu"
A : int = self.get_dummy_components()
A : int = self.pipeline_class(**snake_case_ )
A : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A : Optional[int] = pipe(**self.get_dummy_inputs(snake_case_ ) )
A : List[str] = output.images[0]
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Optional[int] = torch_device == "cpu"
A : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case_ , relax_max_difference=snake_case_ , )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[Any] = self.get_dummy_components()
A : Any = self.pipeline_class(**snake_case_ )
A : Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A : Any = 1
A : List[Any] = 2
A : Dict = self.get_dummy_inputs(snake_case_ )
for key in inputs.keys():
if key in self.batch_params:
A : Any = batch_size * [inputs[key]]
A : List[Any] = pipe(**snake_case_ , num_images_per_prompt=snake_case_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
A : Tuple = ShapEPipeline.from_pretrained('''openai/shap-e''' )
A : Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
A : Optional[Any] = pipe(
'''a shark''' , generator=snake_case_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ ) | 256 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a : Union[str, Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["CLIPFeatureExtractor"]
__a : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=100 ,_A=13 ,_A=30 ,_A=2 ,_A=3 ,_A=True ,_A=True ,_A=32 ,_A=4 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,_A=3 ,_A=None ,_A=[0, 1, 2, 3] ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = 100
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : int = out_indices
_lowerCAmelCase : List[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[Any] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_A ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BeitModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BeitForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.type_sequence_label_size
_lowerCAmelCase : Dict = BeitForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Union[str, Any] = BeitForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Any = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = BeitForSemanticSegmentation(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_lowerCAmelCase : Any = model(_A ,labels=_A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
_UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BeitModelTester(self )
_lowerCAmelCase : int = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_A ), BeitForMaskedImageModeling]:
continue
_lowerCAmelCase : Dict = model_class(_A )
model.to(_A )
model.train()
_lowerCAmelCase : Dict = self._prepare_for_class(_A ,_A ,return_labels=_A )
_lowerCAmelCase : Optional[int] = model(**_A ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase : List[str] = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
_lowerCAmelCase : Tuple = self._prepare_for_class(_A ,_A ,return_labels=_A )
_lowerCAmelCase : Tuple = model(**_A ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(config=_A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = BeitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_A )
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : str = image_processor(images=_A ,return_tensors='pt' ).pixel_values.to(_A )
# prepare bool_masked_pos
_lowerCAmelCase : Optional[int] = torch.ones((1, 196) ,dtype=torch.bool ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(pixel_values=_A ,bool_masked_pos=_A )
_lowerCAmelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCAmelCase : Tuple = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,_A )
_lowerCAmelCase : List[Any] = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,_A ,atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_A )
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**_A )
_lowerCAmelCase : Tuple = outputs.logits
# verify the logits
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape ,_A )
_lowerCAmelCase : Optional[int] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] ,_A ,atol=1E-4 ) )
_lowerCAmelCase : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_A )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**_A )
_lowerCAmelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCAmelCase : int = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape ,_A )
_lowerCAmelCase : List[str] = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] ,_A ,atol=1E-4 ) )
_lowerCAmelCase : Optional[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase : Optional[int] = model.to(_A )
_lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=_A ,size=640 ,do_center_crop=_A )
_lowerCAmelCase : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowerCAmelCase : Dict = Image.open(ds[0]['file'] )
_lowerCAmelCase : Union[str, Any] = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**_A )
_lowerCAmelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,_A )
_lowerCAmelCase : str = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_lowerCAmelCase : Any = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] ,device=_A ,)
else:
_lowerCAmelCase : Any = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] ,device=_A ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_A ,atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase : Dict = model.to(_A )
_lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=_A ,size=640 ,do_center_crop=_A )
_lowerCAmelCase : int = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowerCAmelCase : Tuple = Image.open(ds[0]['file'] )
_lowerCAmelCase : Union[str, Any] = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**_A )
_lowerCAmelCase : Dict = outputs.logits.detach().cpu()
_lowerCAmelCase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_A ,target_sizes=[(500, 300)] )
_lowerCAmelCase : Optional[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,_A )
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=_A )
_lowerCAmelCase : str = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,_A )
| 259 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.